query
stringlengths 8
6.75k
| document
stringlengths 9
1.89M
| negatives
listlengths 19
19
| metadata
dict |
---|---|---|---|
HostUUIDExistsLocally checks if dataDir/host_uuid file exists in local storage. | func HostUUIDExistsLocally(dataDir string) bool {
_, err := ReadHostUUID(dataDir)
return err == nil
} | [
"func (r *Release) localExist() error {\n\tvar (\n\t\tversion string = fmt.Sprintf(\"terraform-%s.zip\", r.Version)\n\t\terr error\n\t)\n\n\tif _, err = os.Stat(filepath.Join(r.Home, PathTmp.toString(), version)); !os.IsNotExist(err) {\n\t\tfmt.Println(\"Already in cache ...\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func recoveryFileExists(roleId uint64, filename string) (bool, error) {\n err := idempotentCreateRecoveryDir(roleId)\n if err != nil { return false, err }\n\n path := fmt.Sprintf(RECOVERY_FILE_PATTERN, roleId, filename)\n _, err = os.Stat(path) \n if os.IsNotExist(err) {\n return false, nil\n } else if err != nil {\n return false, err\n } else {\n return true, nil\n }\n}",
"func (f *FileStorage) Exists(dashboardID string) (bool, error) {\n\tif _, err := os.Stat(f.getFilePath(dashboardID)); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn true, nil\n}",
"func (b *Binary) LocalExist() bool {\n\treturn b.file.LocalExist()\n}",
"func IsExist(err error) bool",
"func exists() bool {\r\n\t_, err := ioutil.ReadFile(\"nodestore.json\")\r\n\tif os.IsNotExist(err) {\r\n\t\treturn false\r\n\t}\r\n\treturn true\r\n}",
"func (l *LocalStorage) IsPresent(ctx context.Context, identifier string) bool {\n\t_, err := os.Stat(path.Join(l.path, identifier))\n\treturn err == nil\n}",
"func FilesStorageExists(exec boil.Executor, iD int) (bool, error) {\n\tvar exists bool\n\tsql := \"select exists(select 1 from `files_storages` where `id`=? limit 1)\"\n\n\tif boil.DebugMode {\n\t\tfmt.Fprintln(boil.DebugWriter, sql)\n\t\tfmt.Fprintln(boil.DebugWriter, iD)\n\t}\n\n\trow := exec.QueryRow(sql, iD)\n\n\terr := row.Scan(&exists)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: unable to check if files_storages exists\")\n\t}\n\n\treturn exists, nil\n}",
"func machineExists(id string) bool {\n\tmut.Lock()\n\tlogFile.Seek(0, 0)\n\tdefer logFile.Seek(0, 2)\n\tdefer mut.Unlock()\n\tscanner := bufio.NewScanner(logFile)\n\tfor scanner.Scan() {\n\t\tif strings.Contains(scanner.Text(), id) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (f *FileStorage) IsExist() bool {\n\tif _, err := os.Stat(f.fullPath); os.IsExist(err) {\n\t\treturn true\n\t}\n\treturn false\n}",
"func db_check_user_exists(username string) bool {\n file_path := path.Join(\"db/users\", strings.ToLower(username) + \".json\")\n \n if _, err := os.Stat(file_path); !os.IsNotExist(err) {\n return true\n }\n return false\n}",
"func (z *ZKC) identityExists(id [zkidentity.IdentitySize]byte) bool {\n\t_, err := os.Stat(path.Join(z.settings.Root, inboundDir,\n\t\thex.EncodeToString(id[:]), identityFilename))\n\tif err == nil {\n\t\tids := hex.EncodeToString(id[:])\n\t\tfullPath := path.Join(z.settings.Root, inboundDir, ids)\n\t\t_, err1 := os.Stat(path.Join(fullPath, ratchetFilename))\n\t\t_, err2 := os.Stat(path.Join(fullPath, halfRatchetFilename))\n\t\tif err1 == nil || err2 == nil {\n\t\t\treturn true\n\t\t}\n\n\t\t// this happens during reset condiftion\n\t\tz.Dbg(idZKC, \"identityExists: reset condition\")\n\t\treturn false\n\t}\n\n\treturn false\n}",
"func (_UsersData *UsersDataCaller) IsUuidExist(opts *bind.CallOpts, uuid [16]byte) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _UsersData.contract.Call(opts, out, \"isUuidExist\", uuid)\n\treturn *ret0, err\n}",
"func MasterFileExists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}",
"func (_UsersData *UsersDataCallerSession) IsUuidExist(uuid [16]byte) (bool, error) {\n\treturn _UsersData.Contract.IsUuidExist(&_UsersData.CallOpts, uuid)\n}",
"func Exists(uid int, address string) bool {\n\tnowTime := time.Now().Unix()\n\n\tif uCache, ok := localCache.UIDCache[uid]; ok {\n\t\t// cache未过期\n\t\tif uCache.lastModifyTime+CacheExpireTime> nowTime {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif addrCache, ok := localCache.AddressCache[address]; ok {\n\t\t// cache未过期\n\t\tif addrCache.lastModifyTime+CacheExpireTime> nowTime {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func existDevice() bool {\n\tcacheData, _ := db.Redis.Keys(\"*\").Result()\n\n\tfor _, key := range cacheData {\n\t\treturn strings.Contains(key, \"device\")\n\t}\n\n\treturn false\n}",
"func (d *Driver) Exists(id string) bool {\n\t_, err := os.Stat(d.dir(id))\n\treturn err == nil\n}",
"func (gcs *StorageConnection) CheckFileExists(ctx context.Context, fileName string) (exists bool) {\n\tvar (\n\t\terr error\n\t\tattrs *storage.ObjectAttrs\n\t)\n\tit := gcs.bucket.Objects(ctx, nil)\n\tfor {\n\t\tattrs, err = it.Next()\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\t\tif attrs.Name == fileName {\n\t\t\texists = true\n\t\t\treturn\n\t\t}\n\t}\n\texists = false\n\treturn\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ReadHostUUID reads host UUID from the file in the data dir | func ReadHostUUID(dataDir string) (string, error) {
out, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return "", err
}
return "", trace.ConvertSystemError(err)
}
id := strings.TrimSpace(string(out))
if id == "" {
return "", trace.NotFound("host uuid is empty")
}
return id, nil
} | [
"func ReadOrMakeHostUUID(dataDir string) (string, error) {\n\tid, err := ReadHostUUID(dataDir)\n\tif err == nil {\n\t\treturn id, nil\n\t}\n\tif !trace.IsNotFound(err) {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\t// Checking error instead of the usual uuid.New() in case uuid generation\n\t// fails due to not enough randomness. It's been known to happen happen when\n\t// Teleport starts very early in the node initialization cycle and /dev/urandom\n\t// isn't ready yet.\n\trawID, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn \"\", trace.BadParameter(\"\" +\n\t\t\t\"Teleport failed to generate host UUID. \" +\n\t\t\t\"This may happen if randomness source is not fully initialized when the node is starting up. \" +\n\t\t\t\"Please try restarting Teleport again.\")\n\t}\n\tid = rawID.String()\n\tif err = WriteHostUUID(dataDir, id); err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn id, nil\n}",
"func UUIDFile(fpath string) (string, error) {\n\n\t_, err := os.Stat(fpath)\n\tif err != nil && !os.IsExist(err) {\n\t\tkey := uuid.New().String()\n\t\tif err := ioutil.WriteFile(fpath, []byte(key), 0777); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn key, nil\n\t}\n\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := string(data)\n\tif _, err := uuid.Parse(key); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn key, nil\n}",
"func Read(args ...string) (*UUID, error) {\n\tfpath := sfFilePath(args)\n\tf, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tdata := make([]byte, UUIDHexLen+8)\n\tn, err := f.Read(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n < UUIDHexLen {\n\t\treturn nil, fmt.Errorf(\"File '%s' is too small\", fpath)\n\t}\n\tdata = data[:n]\n\tuuid, err := Decode(string(data))\n\tif err == nil {\n\t\tnc := &cache{uuid: *uuid, filePath: fpath, validationTime: time.Now().Add(ValidationTimePeriod)}\n\t\tatomic.StorePointer(¤t, unsafe.Pointer(nc))\n\t}\n\treturn uuid, err\n}",
"func ReadUUID(buffer []byte, offset int) UUID {\n bytes := ReadBytes(buffer, offset, 16)\n return UUIDFromBytes(bytes)\n}",
"func (b *Broker) readIDFromFile(home, filepath string) (id string, err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\t_bytes, err := ioutil.ReadFile(_filepath)\n\tif err != nil {\n\t\treturn\n\t}\n\tid = string(_bytes)\n\treturn\n}",
"func readMachineID() []byte {\n\tid := make([]byte, 3)\n\thostname, err1 := os.Hostname()\n\tif err1 != nil {\n\t\t_, err2 := io.ReadFull(rand.Reader, id)\n\t\tif err2 != nil {\n\t\t\tpanic(fmt.Errorf(\"cannot get hostname: %v; %v\", err1, err2))\n\t\t}\n\t\treturn id\n\t}\n\thw := md5.New()\n\thw.Write([]byte(hostname))\n\tcopy(id, hw.Sum(nil))\n\treturn id\n}",
"func readMachineID() []byte {\n\tid := make([]byte, 3)\n\tif hostname, err := os.Hostname(); err == nil {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hostname))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"Cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}",
"func readID(path string) (string, error) {\n\traw, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn \"\", maskAny(errgo.WithCausef(nil, InvalidArgumentError, \"%s does not exist\", path))\n\t\t}\n\t\treturn \"\", maskAny(err)\n\t}\n\treturn strings.TrimSpace(string(raw)), nil\n}",
"func (packet *Packet) ReadUUID() string {\n\tlength := packet.ReadUint16()\n\tif length != 36 {\n\t\tpanic(fmt.Sprintf(\"Wrong UUID length! Expecting 36, got %d\", length))\n\t}\n\treturn string(packet.ReadBytes(uint32(length)))\n}",
"func WriteHostUUID(dataDir string, id string) error {\n\terr := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn err\n\t\t}\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\treturn nil\n}",
"func path2uuid(fpath string) string {\n\n\text := path.Ext(fpath) // identify extension\n\tfilename := strings.TrimSuffix(fpath, ext) // find filename\n\tuuid := path.Base(filename) // implement basename cmd\n\n\treturn uuid\n}",
"func readMachineId() []byte {\n\tvar sum [3]byte\n\tid := sum[:]\n\thostname, err1 := os.Hostname()\n\tif err1 != nil {\n\t\tn := uint32(time.Now().UnixNano())\n\t\tsum[0] = byte(n >> 0)\n\t\tsum[1] = byte(n >> 8)\n\t\tsum[2] = byte(n >> 16)\n\t\treturn id\n\t}\n\thw := md5.New()\n\thw.Write([]byte(hostname))\n\tcopy(id, hw.Sum(nil))\n\treturn id\n}",
"func readInstanceID() string {\n\tconst instanceIDFile = \"/var/lib/cloud/data/instance-id\"\n\tidBytes, err := ioutil.ReadFile(instanceIDFile)\n\tif err != nil {\n\t\tglog.Infof(\"Failed to get instance id from file: %v\", err)\n\t\treturn \"\"\n\t} else {\n\t\tinstanceID := string(idBytes)\n\t\tinstanceID = strings.TrimSpace(instanceID)\n\t\tglog.Infof(\"Get instance id from file: %s\", instanceID)\n\t\treturn instanceID\n\t}\n}",
"func removeUuidFromFilepath(path string) string {\n\t// UUID has 4 hyphens, so we split into 6 parts. \n\treturn strings.SplitN(filepath.Base(path), \"-\", 6)[5]\n}",
"func parseUUID(device, output string) (string, error) {\n\n\t// find the line with the uuid\n\tlines := strings.Split(output, \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.Index(line, \"Disk identifier (GUID)\") != -1 {\n\t\t\twords := strings.Split(line, \" \")\n\t\t\tfor _, word := range words {\n\t\t\t\t// we expect most words in the line not to be a uuid, but will return the first one that is\n\t\t\t\tresult, err := uuid.Parse(word)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn result.String(), nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"uuid not found for device %s. output=%s\", device, output)\n}",
"func (b *BinaryReader) ReadUUID(off int64) (*common.UUID, error) {\n\tif _, err := b.readToBuffer(16, off); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn common.NewUUID(b.buffer)\n}",
"func HostUUIDExistsLocally(dataDir string) bool {\n\t_, err := ReadHostUUID(dataDir)\n\treturn err == nil\n}",
"func (client *XenClient) HostGetUuid(self string) (result string, err error) {\n\tobj, err := client.APICall(\"host.get_uuid\", self)\n\tif err != nil {\n\t\treturn\n\t}\n\tresult = obj.(string)\n\treturn\n}",
"func (s *Store) readID() error {\n\tb, err := ioutil.ReadFile(s.IDPath())\n\tif os.IsNotExist(err) {\n\t\ts.id = 0\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"read file: %s\", err)\n\t}\n\n\tid, err := strconv.ParseUint(string(b), 10, 64)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse id: %s\", err)\n\t}\n\ts.id = id\n\n\ts.Logger.Printf(\"read local node id: %d\", s.id)\n\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
WriteHostUUID writes host UUID into a file | func WriteHostUUID(dataDir string, id string) error {
err := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)
if err != nil {
if errors.Is(err, fs.ErrPermission) {
//do not convert to system error as this loses the ability to compare that it is a permission error
return err
}
return trace.ConvertSystemError(err)
}
return nil
} | [
"func WriteUUID(buffer []byte, offset int, value UUID) {\n bytes, _ := value.MarshalBinary()\n WriteBytes(buffer, offset, bytes)\n}",
"func (packet *Packet) WriteUUIDString(data string) {\n\tpacket.WriteString(data)\n}",
"func ReadOrMakeHostUUID(dataDir string) (string, error) {\n\tid, err := ReadHostUUID(dataDir)\n\tif err == nil {\n\t\treturn id, nil\n\t}\n\tif !trace.IsNotFound(err) {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\t// Checking error instead of the usual uuid.New() in case uuid generation\n\t// fails due to not enough randomness. It's been known to happen happen when\n\t// Teleport starts very early in the node initialization cycle and /dev/urandom\n\t// isn't ready yet.\n\trawID, err := uuid.NewRandom()\n\tif err != nil {\n\t\treturn \"\", trace.BadParameter(\"\" +\n\t\t\t\"Teleport failed to generate host UUID. \" +\n\t\t\t\"This may happen if randomness source is not fully initialized when the node is starting up. \" +\n\t\t\t\"Please try restarting Teleport again.\")\n\t}\n\tid = rawID.String()\n\tif err = WriteHostUUID(dataDir, id); err != nil {\n\t\treturn \"\", trace.Wrap(err)\n\t}\n\treturn id, nil\n}",
"func UUIDFile(fpath string) (string, error) {\n\n\t_, err := os.Stat(fpath)\n\tif err != nil && !os.IsExist(err) {\n\t\tkey := uuid.New().String()\n\t\tif err := ioutil.WriteFile(fpath, []byte(key), 0777); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn key, nil\n\t}\n\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := string(data)\n\tif _, err := uuid.Parse(key); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn key, nil\n}",
"func (cmd *Command) writePIDFile(path string) error {\n\t// Ignore if path is not set.\n\tif path == \"\" {\n\t\treturn nil\n\t}\n\n\t// Ensure the required directory structure exists.\n\tif err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {\n\t\treturn fmt.Errorf(\"mkdir: %s\", err)\n\t}\n\n\t// Retrieve the PID and write it.\n\tpid := strconv.Itoa(os.Getpid())\n\tif err := ioutil.WriteFile(path, []byte(pid), 0666); err != nil {\n\t\treturn fmt.Errorf(\"write file: %s\", err)\n\t}\n\n\treturn nil\n}",
"func ReadHostUUID(dataDir string) (string, error) {\n\tout, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", trace.ConvertSystemError(err)\n\t}\n\tid := strings.TrimSpace(string(out))\n\tif id == \"\" {\n\t\treturn \"\", trace.NotFound(\"host uuid is empty\")\n\t}\n\treturn id, nil\n}",
"func post(w http.ResponseWriter,r *http.Request) {\n\toutput, _ := exec.Command(\"dbus-uuidgen\").Output()\n\tuuid := strings.TrimSuffix(string(output), \"\\n\") //注意生成的uuid包含\\n后缀,而在url中该字符别翻译为%OA,造成无法删除临时问题\n\tname := strings.Split(r.URL.EscapedPath(), \"/\")[2]\n\tsize, e := strconv.ParseInt(r.Header.Get(\"size\"), 0, 64)\n\tif e != nil{\n\t\tlog.Errorf(\"Temp/<hash> post parse_size error %v\",e)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tt := tempinfo{Uuid:uuid,Name:name,Size:size}\n\te = t.writeToFile()\n\tif e!= nil{\n\t\tlog.Errorf(\"Temp/<hash> post write to file error %v\",e)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\tos.Create(os.Getenv(\"STORAGE_ROOT\")+\"/temp/\"+t.Uuid+\".dat\")\n\tw.Write([]byte(t.Uuid))\n}",
"func (device *IO4V2Bricklet) WriteUID(uid uint32) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, uid)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionWriteUID), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func (d *Daemon) WritePid(file string, pid int) error {\n\treturn ioutil.WriteFile(file, []byte(fmt.Sprintf(\"%d\", pid)), 0644)\n}",
"func writeHostMap(hostMap map[string]string) {\n\tif host_list_file == \"\" {\n\t\treturn\n\t}\n\tf, err := os.Create(host_list_file)\n\tif err != nil {\n\t\tlogr.LogLine(logr.Lerror, ltagsrc, err.Error())\n\t}\n\tdefer f.Close()\n\n\tfor host := range hostMap {\n\t\tf.WriteString(fmt.Sprintf(\"%s\\n\", host))\n\t}\n}",
"func (b *Broker) createIDFile(home string, filepath string, id string) (err error) {\n\t_filepath := fmt.Sprintf(\"%v%v%v\", home, string(os.PathSeparator), filepath)\n\terr = ioutil.WriteFile(_filepath, []byte(id), 0644)\n\n\treturn\n}",
"func WritePidFile(path string, pid int) error {\n\tlog.WithField(\"pid\", pid).Debug(\"writing pid file\")\n\tpidFile, err := os.Create(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write pid file: %v\", err)\n\t}\n\tdefer pidFile.Close()\n\tpidFile.WriteString(strconv.Itoa(pid))\n\treturn nil\n}",
"func (device *AirQualityBricklet) WriteUID(uid uint32) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, uid)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionWriteUID), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func (device *DCV2Bricklet) WriteUID(uid uint32) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, uid)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionWriteUID), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func createHostFile(ip, password string) (string, error) {\n\thostFile, err := ioutil.TempFile(\"\", \"test\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"coud not make temporary file: %s\", err)\n\t}\n\tdefer hostFile.Close()\n\n\t_, err = hostFile.WriteString(fmt.Sprintf(`[win]\n%s ansible_password=%s\n[win:vars]\nansible_user=core\nansible_port=%s\nansible_connection=winrm\nansible_winrm_server_cert_validation=ignore`, ip, password, winRMPort))\n\treturn hostFile.Name(), err\n}",
"func (device *IndustrialDigitalIn4V2Bricklet) WriteUID(uid uint32) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, uid)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionWriteUID), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func writeUint24(b *bytes.Buffer, value uint24) {\n\tb.WriteByte(byte(value))\n\tb.WriteByte(byte(value >> 8))\n\tb.WriteByte(byte(value >> 16))\n}",
"func encodeUUID(src [16]byte) string {\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", src[0:4], src[4:6], src[6:8], src[8:10], src[10:16])\n}",
"func (h Handle) SetHostID(id uint32) error {\n\thostid := C.u_int32_t(id)\n\terr := h.ioctl(C.DIOCSETHOSTID, unsafe.Pointer(&hostid))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"DIOCSETHOSTID : %s\", err)\n\t}\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ReadOrMakeHostUUID looks for a hostid file in the data dir. If present, returns the UUID from it, otherwise generates one | func ReadOrMakeHostUUID(dataDir string) (string, error) {
id, err := ReadHostUUID(dataDir)
if err == nil {
return id, nil
}
if !trace.IsNotFound(err) {
return "", trace.Wrap(err)
}
// Checking error instead of the usual uuid.New() in case uuid generation
// fails due to not enough randomness. It's been known to happen happen when
// Teleport starts very early in the node initialization cycle and /dev/urandom
// isn't ready yet.
rawID, err := uuid.NewRandom()
if err != nil {
return "", trace.BadParameter("" +
"Teleport failed to generate host UUID. " +
"This may happen if randomness source is not fully initialized when the node is starting up. " +
"Please try restarting Teleport again.")
}
id = rawID.String()
if err = WriteHostUUID(dataDir, id); err != nil {
return "", trace.Wrap(err)
}
return id, nil
} | [
"func ReadHostUUID(dataDir string) (string, error) {\n\tout, err := ReadPath(filepath.Join(dataDir, HostUUIDFile))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn \"\", trace.ConvertSystemError(err)\n\t}\n\tid := strings.TrimSpace(string(out))\n\tif id == \"\" {\n\t\treturn \"\", trace.NotFound(\"host uuid is empty\")\n\t}\n\treturn id, nil\n}",
"func UUIDFile(fpath string) (string, error) {\n\n\t_, err := os.Stat(fpath)\n\tif err != nil && !os.IsExist(err) {\n\t\tkey := uuid.New().String()\n\t\tif err := ioutil.WriteFile(fpath, []byte(key), 0777); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn key, nil\n\t}\n\n\tfp, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer fp.Close()\n\tdata, err := ioutil.ReadAll(fp)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tkey := string(data)\n\tif _, err := uuid.Parse(key); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn key, nil\n}",
"func HostUUIDExistsLocally(dataDir string) bool {\n\t_, err := ReadHostUUID(dataDir)\n\treturn err == nil\n}",
"func GetHostID(fallbackUUID string) (id string) {\n\tshortID := genShortID()\n\tid = fmt.Sprintf(\"unknown_hostname_%s-agent\", shortID)\n\tname, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Printf(\"GetHostID: %v\", err)\n\t\treturn\n\t}\n\tname = fmt.Sprintf(\"%s\\\\%s\", name, GetUsername()) // hostname\\\\username\n\tid = fmt.Sprintf(\"%s_%s-agent\", name, shortID)\n\tproductInfo, err := ghw.Product()\n\tif err != nil {\n\t\tlog.Printf(\"GetHostID: %v\", err)\n\t\treturn\n\t}\n\n\tif productInfo.UUID != \"unknown\" {\n\t\tid = fmt.Sprintf(\"%s_%s-agent-%s\", name, shortID, productInfo.UUID)\n\t} else {\n\t\tid = fmt.Sprintf(\"%s_%s-agent-%s\", name, shortID, fallbackUUID)\n\t}\n\treturn\n}",
"func WriteHostUUID(dataDir string, id string) error {\n\terr := os.WriteFile(filepath.Join(dataDir, HostUUIDFile), []byte(id), os.ModeExclusive|0400)\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrPermission) {\n\t\t\t//do not convert to system error as this loses the ability to compare that it is a permission error\n\t\t\treturn err\n\t\t}\n\t\treturn trace.ConvertSystemError(err)\n\t}\n\treturn nil\n}",
"func readMachineID() []byte {\n\tid := make([]byte, 3)\n\tif hostname, err := os.Hostname(); err == nil {\n\t\thw := md5.New()\n\t\thw.Write([]byte(hostname))\n\t\tcopy(id, hw.Sum(nil))\n\t} else {\n\t\t// Fallback to rand number if machine id can't be gathered\n\t\tif _, randErr := rand.Reader.Read(id); randErr != nil {\n\t\t\tpanic(fmt.Errorf(\"Cannot get hostname nor generate a random number: %v; %v\", err, randErr))\n\t\t}\n\t}\n\treturn id\n}",
"func readMachineID() []byte {\n\tid := make([]byte, 3)\n\thostname, err1 := os.Hostname()\n\tif err1 != nil {\n\t\t_, err2 := io.ReadFull(rand.Reader, id)\n\t\tif err2 != nil {\n\t\t\tpanic(fmt.Errorf(\"cannot get hostname: %v; %v\", err1, err2))\n\t\t}\n\t\treturn id\n\t}\n\thw := md5.New()\n\thw.Write([]byte(hostname))\n\tcopy(id, hw.Sum(nil))\n\treturn id\n}",
"func getHostFromUUID(id string) (*model.Host, error) {\n\thosts, err := driver.GetHosts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, host := range *hosts {\n\t\tif host.UUID == id {\n\t\t\t// Host Matches\n\t\t\tlog.Tracef(\"current host matches with id=%s\", id)\n\t\t\treturn host, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"no host found with id %s\", id)\n}",
"func getUserIdentifier() string {\n\tvar config Configuration\n\tif _, err := os.Stat(\"./lemon_seed\"); os.IsNotExist(err) {\n\t\t// not exist, generate one\n\t\tconfig.ClientID = uuid.NewV4().String()\n\t\tconfigBytes, err := json.Marshal(config)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Marshal error when generating configBytes: %s\", err.Error())\n\t\t}\n\n\t\terr = ioutil.WriteFile(\"./lemon_seed\", configBytes, 0666)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Error when write config to seed: %s\", err.Error())\n\t\t}\n\t\treturn config.ClientID\n\t} else {\n\t\t// lemon_config exist\n\t\tbody, err := ioutil.ReadFile(\"./lemon_seed\")\n\t\tif err != nil {\n\t\t\tlogger.Fatal(\"Read exception.\")\n\t\t}\n\t\terr = json.Unmarshal(body, &config)\n\t\tif err != nil {\n\t\t\tlogger.Fatalf(\"Unmarshal error when reading seed: %s\", err.Error())\n\t\t}\n\t\treturn config.ClientID\n\t}\n}",
"func path2uuid(fpath string) string {\n\n\text := path.Ext(fpath) // identify extension\n\tfilename := strings.TrimSuffix(fpath, ext) // find filename\n\tuuid := path.Base(filename) // implement basename cmd\n\n\treturn uuid\n}",
"func readMachineId() []byte {\n\tvar sum [3]byte\n\tid := sum[:]\n\thostname, err1 := os.Hostname()\n\tif err1 != nil {\n\t\tn := uint32(time.Now().UnixNano())\n\t\tsum[0] = byte(n >> 0)\n\t\tsum[1] = byte(n >> 8)\n\t\tsum[2] = byte(n >> 16)\n\t\treturn id\n\t}\n\thw := md5.New()\n\thw.Write([]byte(hostname))\n\tcopy(id, hw.Sum(nil))\n\treturn id\n}",
"func (c *Config) getRandomId() (string, error) {\n\tb, err := ioutil.ReadFile(c.ProcBootId)\n\tif err != nil {\n\t\tglog.Errorf(\"fail to open %s: %q\", c.ProcBootId, err)\n\t\treturn \"\", err\n\t}\n\trandomId := string(b)\n\trandomId = strings.Trim(randomId, \"\\n\")\n\tglog.V(2).Infof(\"RandomId: %q\", randomId)\n\treturn randomId, nil\n\n}",
"func UniqueHwID() (string, error) {\n\tnetconfig, _ := network.GetConfig()\n\tmac := netconfig.HardwareAddress.String()\n\thardDiskId, _ := GetActiveHddSerial()\n\tbiosId, _ := GetBiosId()\n\tif len(mac) == 17 {\n\t\tmac = strings.Replace(mac, \":\", \"\", -1)[6:]\n\t}\n\tif len(biosId) > 6 {\n\t\tbiosId = biosId[len(biosId)-6:]\n\t}\n\tif len(hardDiskId) > 2 && hardDiskId[0:2] == \"0x\" {\n\t\thardDiskId = hardDiskId[2:]\n\t}\n\ta := 0\n\tb := 0\n\tc := 0\n\n\thwid := \"\"\n\tfor {\n\n\t\tif a >= 0 && a < len(mac) {\n\t\t\thwid += string(mac[a])\n\t\t\ta++\n\t\t} else {\n\t\t\ta = -1\n\t\t}\n\t\tif b >= 0 && b < len(hardDiskId) {\n\t\t\thwid += string(hardDiskId[b])\n\t\t\tb++\n\t\t} else {\n\t\t\tb = -1\n\t\t}\n\t\tif c >= 0 && c < len(biosId) {\n\t\t\thwid += string(biosId[c])\n\t\t\tc++\n\t\t} else {\n\t\t\tc = -1\n\t\t}\n\t\tif a == -1 && b == -1 && c == -1 {\n\t\t\tbreak\n\t\t}\n\n\t}\n\n\treg := regexp.MustCompile(\"[^a-zA-Z0-9]+\")\n\thwid = reg.ReplaceAllString(hwid, \"\")\n\n\treturn hwid, nil\n}",
"func (hd *Datapath) GetUUID() (string, error) {\n\n\t//// Get the\n\t//if hd.Kind == \"hal\" {\n\t//\tresp, err := hd.Hal.SystemClient.SystemUUIDGet(context.Background(), &halproto.Empty{})\n\t//\tif err != nil {\n\t//\t\tlog.Errorf(\"Error creating next hop. Err: %v\", err)\n\t//\t\treturn \"\", err\n\t//\t}\n\t//\tif resp.ApiStatus != halproto.ApiStatus_API_STATUS_OK {\n\t//\t\tlog.Errorf(\"HAL returned non OK status. %v\", resp.ApiStatus.String())\n\t//\t\treturn \"\", fmt.Errorf(\"HAL returned non OK status. %v\", resp.ApiStatus.String())\n\t//\t}\n\t//\treturn resp.Uuid, nil\n\t//}\n\n\t// Mock HAL\n\treturn hd.getDefaultUUID()\n}",
"func HardwareUUID() (string, error) {\n\t/*\n\t\tSample output of 'wmic path Win32_ComputerSystemProduct get uuid'\n\n\t\tUUID\n\t\t4219B2F5-C25F-6AF2-573C-35B0DF557236\n\t*/\n\tresult, err := readAndParseFromCommandLine(hardwareUUIDCmd)\n\tif err != nil {\n\t\treturn \"-1\", err\n\t}\n\thardwareUUID := \"\"\n\tif len(result) > 1 {\n\t\t// remove all spaces from the second line as that line consists hardware uuid\n\t\tre := regexp.MustCompile(\"\\\\s|\\\\r\")\n\t\thardwareUUID = re.ReplaceAllString(result[1], \"\")\n\t}\n\treturn hardwareUUID, nil\n}",
"func HostUID() (int, error) {\n\tconst uidMap = \"/proc/self/uid_map\"\n\n\tcurrentUID := os.Getuid()\n\n\tf, err := os.Open(uidMap)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn 0, fmt.Errorf(\"failed to read: %s: %s\", uidMap, err)\n\t\t}\n\t\t// user namespace not supported\n\t\treturn currentUID, nil\n\t}\n\tdefer f.Close()\n\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tfields := strings.Fields(scanner.Text())\n\n\t\tsize, err := strconv.ParseUint(fields[2], 10, 32)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"failed to convert size field %s: %s\", fields[2], err)\n\t\t}\n\t\t// not in a user namespace, use current UID\n\t\tif uint32(size) == ^uint32(0) {\n\t\t\tbreak\n\t\t}\n\n\t\t// we are inside a user namespace\n\t\tcontainerID, err := strconv.ParseUint(fields[0], 10, 32)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"failed to convert container UID field %s: %s\", fields[0], err)\n\t\t}\n\t\t// we can safely assume that a user won't have two\n\t\t// consequent UID and we look if current UID match\n\t\t// a 1:1 user mapping\n\t\tif size == 1 && uint32(currentUID) == uint32(containerID) {\n\t\t\tuid, err := strconv.ParseUint(fields[1], 10, 32)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, fmt.Errorf(\"failed to convert host UID field %s: %s\", fields[1], err)\n\t\t\t}\n\t\t\treturn int(uid), nil\n\t\t}\n\t}\n\n\t// return current UID by default\n\treturn currentUID, nil\n}",
"func getHostId() (uint64, error) {\n\ta := getLocalIP()\n\tip := (uint64(a[0]) << 24) + (uint64(a[1]) << 16) + (uint64(a[2]) << 8) + uint64(a[3])\n\treturn ip % MaxHostId, nil\n}",
"func (o *NetworkLicenseFile) GetHostId() string {\n\tif o == nil || o.HostId == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.HostId\n}",
"func parseUUID(device, output string) (string, error) {\n\n\t// find the line with the uuid\n\tlines := strings.Split(output, \"\\n\")\n\tfor _, line := range lines {\n\t\tif strings.Index(line, \"Disk identifier (GUID)\") != -1 {\n\t\t\twords := strings.Split(line, \" \")\n\t\t\tfor _, word := range words {\n\t\t\t\t// we expect most words in the line not to be a uuid, but will return the first one that is\n\t\t\t\tresult, err := uuid.Parse(word)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn result.String(), nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"uuid not found for device %s. output=%s\", device, output)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
StringSliceSubset returns true if b is a subset of a. | func StringSliceSubset(a []string, b []string) error {
aset := make(map[string]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
} | [
"func SliceStringIsSubset(larger, smaller []string) (bool, []string) {\n\tlargerSet := make(map[string]struct{}, len(larger))\n\tfor _, l := range larger {\n\t\tlargerSet[l] = struct{}{}\n\t}\n\n\tsubset := true\n\tvar offending []string\n\tfor _, s := range smaller {\n\t\tif _, ok := largerSet[s]; !ok {\n\t\t\tsubset = false\n\t\t\toffending = append(offending, s)\n\t\t}\n\t}\n\n\treturn subset, offending\n}",
"func (s String) IsSubset(other String) bool {\n\tif len(s) > len(other) {\n\t\treturn false\n\t}\n\n\tfor k := range s {\n\t\tif _, ok := other[k]; !ok {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}",
"func (s stringSet) isSubsetOf(t stringSet) bool {\n\tj := 0\n\tfor _, ss := range s {\n\t\tfor j < len(t) && t[j] < ss {\n\t\t\tj++\n\t\t}\n\t\tif j >= len(t) || t[j] != ss {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func predicateIsSubset(a interface{}, b ...interface{}) predicate.BoolPredicate {\n\treturn func() bool {\n\t\t// Populate the set.\n\t\tset := map[string]bool{}\n\t\tfor _, bval := range b {\n\t\t\ts, ok := bval.(string)\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tset[s] = true\n\t\t}\n\n\t\tswitch aval := a.(type) {\n\t\tcase string:\n\t\t\treturn set[aval]\n\t\tcase []string:\n\t\t\tfor _, v := range aval {\n\t\t\t\tif !set[v] {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n}",
"func sliceEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor _, v := range a {\n\t\tif !stringInSlice(v, b) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceIncludeSlice(a, b []string) bool {\n\tif EqualSlice(a, b) {\n\t\treturn true\n\t}\n\tfor _, item := range b {\n\t\tif !StringsContain(a, item) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceStringPEqual(a, b []*string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tsa := make([]string, len(a))\n\tsb := make([]string, len(a))\n\tfor x, aPtr := range a {\n\t\tsa[x] = *aPtr\n\t\tsb[x] = *b[x]\n\t}\n\tsort.Strings(sa)\n\tsort.Strings(sb)\n\treturn sortedStringSliceEqual(sa, sb)\n}",
"func SliceStringPEqual(a, b []*string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tsa := make([]string, len(a))\n\tsb := make([]string, len(a))\n\tfor x, aPtr := range a {\n\t\tsa[x] = *aPtr\n\t\tsb[x] = *b[x]\n\t}\n\tsort.Strings(sa)\n\tsort.Strings(sb)\n\tfor x, aVal := range sa {\n\t\tbVal := sb[x]\n\t\tif aVal != bVal {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s StringSet) Subset(t StringSet) bool {\n\tfor k := range s {\n\t\tif _, ok := t[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func isSliceInString(src string, slice []string) bool {\n\tfor _, dst := range slice {\n\t\tif strings.Contains(src, dst) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func SliceContainsSlice(old, new []string) bool {\n\tfor _, newElement := range new {\n\t\tin := false\n\t\tfor _, oldElement := range old {\n\t\t\tif newElement == oldElement {\n\t\t\t\tin = true\n\t\t\t}\n\t\t}\n\t\tif !in {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func substringContainedInSlice(str string, substrs []string) bool {\n\tfor _, s := range substrs {\n\t\tif strings.Contains(str, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func isSubset(setA, setB []string) bool {\n\tset := make(map[string]bool)\n\tfor _, v := range setB {\n\t\tset[v] = true\n\t}\n\tfor _, v := range setA {\n\t\tif !set[v] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func StringSlicesIntersection(a, b []string) (c []string) {\n\tm := make(map[string]bool)\n\n\tfor _, item := range a {\n\t\tm[item] = true\n\t}\n\n\tfor _, item := range b {\n\t\tif _, ok := m[item]; ok {\n\t\t\tc = append(c, item)\n\t\t}\n\t}\n\treturn\n}",
"func equalStringSlice(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (f *GeneralFunc) inStringSlice(data string, ss []string) bool {\n\tfor _, s := range ss {\n\t\tif data == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func sliceContains(str string, s []string) bool {\n\tfor i := range s {\n\t\tif str == s[i] {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StringInSlice(a string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == a {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func Subset(first, second []string) bool {\n\tset := make(map[string]bool)\n\tfor _, value := range second {\n\t\tset[value] = true\n\t}\n\n\tfor _, value := range first {\n\t\tif !set[value] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
UintSliceSubset returns true if b is a subset of a. | func UintSliceSubset(a []uint16, b []uint16) error {
aset := make(map[uint16]bool)
for _, v := range a {
aset[v] = true
}
for _, v := range b {
_, ok := aset[v]
if !ok {
return trace.BadParameter("%v not in set", v)
}
}
return nil
} | [
"func StringSliceSubset(a []string, b []string) error {\n\taset := make(map[string]bool)\n\tfor _, v := range a {\n\t\taset[v] = true\n\t}\n\n\tfor _, v := range b {\n\t\t_, ok := aset[v]\n\t\tif !ok {\n\t\t\treturn trace.BadParameter(\"%v not in set\", v)\n\t\t}\n\n\t}\n\treturn nil\n}",
"func SliceSubset(slice1, slice2 interface{}) (bool, error) {\n\n\tswitch x := slice1.(type) {\n\tcase []DRAState:\n\t\tstateSlice1, ok1 := slice1.([]DRAState)\n\t\tstateSlice2, ok2 := slice2.([]DRAState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\tcase []TransitionSystemState:\n\t\tstateSlice1, ok1 := slice1.([]TransitionSystemState)\n\t\tstateSlice2, ok2 := slice2.([]TransitionSystemState)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, stateFrom1 := range stateSlice1 {\n\t\t\tif !(stateFrom1.In(stateSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tcase []mc.AtomicProposition:\n\t\tapSlice1, ok1 := slice1.([]mc.AtomicProposition)\n\t\tapSlice2, ok2 := slice2.([]mc.AtomicProposition)\n\n\t\tif (!ok1) || (!ok2) {\n\t\t\treturn false, fmt.Errorf(\"Error converting slice1 (%v) or slice2 (%v).\", ok1, ok2)\n\t\t}\n\n\t\t//Iterate through all TransitionSystemState in stateSlice1 and make sure that they are in 2.\n\t\tfor _, apFrom1 := range apSlice1 {\n\t\t\tif !(apFrom1.In(apSlice2)) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\t\t// If all elements of slice1 are in slice2 then return true!\n\t\treturn true, nil\n\n\tdefault:\n\t\treturn false, fmt.Errorf(\"Unexpected type given to SliceSubset(): %v\", x)\n\t}\n\n}",
"func (ids IDSlice) IsSubsetOf(o IDSlice) bool {\n\tfor _, id := range ids {\n\t\tif !o.Contains(id) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func sliceContainsSlice(smallSlice []core.VarId, bigSlice [][]core.VarId) bool {\n\tfor _, slice := range bigSlice {\n\t\tif slicesIdentical(slice, smallSlice) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func predicateIsSubset(a interface{}, b ...interface{}) predicate.BoolPredicate {\n\treturn func() bool {\n\t\t// Populate the set.\n\t\tset := map[string]bool{}\n\t\tfor _, bval := range b {\n\t\t\ts, ok := bval.(string)\n\t\t\tif !ok {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tset[s] = true\n\t\t}\n\n\t\tswitch aval := a.(type) {\n\t\tcase string:\n\t\t\treturn set[aval]\n\t\tcase []string:\n\t\t\tfor _, v := range aval {\n\t\t\t\tif !set[v] {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n}",
"func (s bitSet) isSubsetOf(o bitSet) bool {\n\treturn s.union(o) == o\n}",
"func isSubset(setA, setB []string) bool {\n\tset := make(map[string]bool)\n\tfor _, v := range setB {\n\t\tset[v] = true\n\t}\n\tfor _, v := range setA {\n\t\tif !set[v] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func isSubset(lhs, rhs ref.Val) ref.Val {\n\ta, ok := lhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(a, \"no such overload\")\n\t}\n\n\tb, ok := rhs.(traits.Lister)\n\tif !ok {\n\t\treturn types.ValOrErr(b, \"no such overload\")\n\t}\n\n\tm := convertToMap(b)\n\n\tfor ai := a.Iterator(); ai.HasNext() == types.True; {\n\t\tva := ai.Next()\n\t\tif m != nil {\n\t\t\tif _, ok := m[va]; !ok {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t} else {\n\t\t\tif !find(b.Iterator(), va) {\n\t\t\t\treturn types.False\n\t\t\t}\n\t\t}\n\t}\n\n\treturn types.True\n}",
"func SliceIntersects(a, b interface{}) bool {\n\taValue, bValue := reflect.ValueOf(a), reflect.ValueOf(b)\n\taValueKind, bValueKind := aValue.Kind(), bValue.Kind()\n\n\tif aValueKind != reflect.Slice || bValueKind != reflect.Slice {\n\t\tpanic(fmt.Sprintf(\"one of parameters is not a slice: (%v, %v)\", aValueKind, bValueKind))\n\t}\n\tfor i := 0; i < bValue.Len(); i++ {\n\t\tfor j := 0; j < aValue.Len(); j++ {\n\t\t\tif bValue.Index(i).Interface() == aValue.Index(j).Interface() {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func IntSliceIntersects(a, b []int) (rb bool) {\n\trb = false\n\tfor _, k := range a {\n\t\tfor _, l := range b {\n\t\t\tif k == l {\n\t\t\t\trb = true\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func (s stringSet) isSubsetOf(t stringSet) bool {\n\tj := 0\n\tfor _, ss := range s {\n\t\tfor j < len(t) && t[j] < ss {\n\t\t\tj++\n\t\t}\n\t\tif j >= len(t) || t[j] != ss {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func SliceEqual(a, b []interface{}) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\n\tif (a == nil) != (b == nil) {\n\t\treturn false\n\t}\n\n\tfor i, v := range a {\n\t\tif v != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func IsSubset(s, t Interface) bool {\n\tfor _, x := range t.Members() {\n\t\tif !s.Contains(x) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func Subset(first, second []string) bool {\n\tset := make(map[string]bool)\n\tfor _, value := range second {\n\t\tset[value] = true\n\t}\n\n\tfor _, value := range first {\n\t\tif !set[value] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func (set *AppleSet) IsSubset(other *AppleSet) bool {\n\tif set.IsEmpty() {\n\t\treturn !other.IsEmpty()\n\t}\n\n\tif other.IsEmpty() {\n\t\treturn false\n\t}\n\n\tset.s.RLock()\n\tother.s.RLock()\n\tdefer set.s.RUnlock()\n\tdefer other.s.RUnlock()\n\n\tfor v := range set.m {\n\t\tif !other.Contains(v) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s Slice) Sub(b Slice) Slice {\n\tlut := map[uuid.UUID]struct{}{}\n\tfor _, id := range b {\n\t\tlut[id] = struct{}{}\n\t}\n\n\tsub := []uuid.UUID{}\n\tfor _, id := range s {\n\t\tif _, foundInB := lut[id]; !foundInB {\n\t\t\tsub = append(sub, id)\n\t\t}\n\t}\n\treturn sub\n}",
"func SliceIncludeSlice(a, b []string) bool {\n\tif EqualSlice(a, b) {\n\t\treturn true\n\t}\n\tfor _, item := range b {\n\t\tif !StringsContain(a, item) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (s *IntSet) Subset(y *IntSet) bool {\n\n\tfor _, m := range s.Members() {\n\t\tif !y.Contains(m) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func EqualsSliceOfCharacteristic(a, b []Characteristic) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif !EqualsCharacteristic(a[i], b[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RemoveFromSlice makes a copy of the slice and removes the passed in values from the copy. | func RemoveFromSlice(slice []string, values ...string) []string {
output := make([]string, 0, len(slice))
remove := make(map[string]bool)
for _, value := range values {
remove[value] = true
}
for _, s := range slice {
_, ok := remove[s]
if ok {
continue
}
output = append(output, s)
}
return output
} | [
"func RemoveFromSlice(slice []string, item string) []string {\n\tfor i, value := range slice {\n\t\tif value == item {\n\t\t\treturn append(slice[:i], slice[i+1:]...)\n\t\t}\n\t}\n\treturn slice\n}",
"func (v *Data) RemoveSlice(start, end int) {\n\tdv := *v\n\n\t*v = append(dv[:start], dv[end:]...)\n\n\tv.Truncate(len(dv) - (end - start))\n}",
"func (v *IntVec) RemoveSlice(start, end int) {\n\tdv := *v\n\n\t*v = append(dv[:start], dv[end:]...)\n\n\tv.Truncate(len(dv) - (end - start))\n}",
"func (k *MutableKey) RemoveSlice(vals []uint64) {\n\tfor _, val := range vals {\n\t\tdelete(k.vals, val)\n\t\tk.synced = false\n\t}\n}",
"func removeFromSlice(rrs []dns.RR, i int) []dns.RR {\n\tif i >= len(rrs) {\n\t\treturn rrs\n\t}\n\trrs = append(rrs[:i], rrs[i+1:]...)\n\treturn rrs\n}",
"func removeFromSlice(data []byte, indexes []int) []byte {\n\tfor _, i := range indexes {\n\t\tdata = append(data[:i], data[i+1:]...)\n\t}\n\treturn data\n}",
"func removeFromSlice(array []string, item string) []string {\n\tfor ind, val := range array {\n\t\tif val == item {\n\t\t\tarray[ind] = array[len(array)-1]\n\t\t\treturn array[:len(array)-1]\n\t\t}\n\t}\n\treturn array\n}",
"func DeleteInSlice(s interface{}, index int) interface{} {\n\tvalue := reflect.ValueOf(s)\n\tif value.Kind() == reflect.Slice {\n\t\t// || value.Kind() == reflect.Array {\n\t\tresult := reflect.AppendSlice(value.Slice(0, index), value.Slice(index+1, value.Len()))\n\t\treturn result.Interface()\n\t}\n\n\tklog.Errorf(\"Only a slice can be passed into this method for deleting an element of it.\")\n\treturn s\n}",
"func RemoveFromArray(slice []string, input string) []string {\n\tvar output []string\n\tfor i, item := range slice {\n\t\tif item == input {\n\t\t\toutput = append(slice[:i], slice[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn output\n}",
"func RemoveItemFromSlice() {\n\tslice := []int{0, 1, 2, 3, 4, 5, 6}\n\tslice = append(slice[:2], slice[3:]...)\n\tfor _, val := range slice {\n\t\tfmt.Println(val)\n\t}\n}",
"func StringSliceRemove(list []string, s string) []string {\n\tfor i, v := range list {\n\t\tif v == s {\n\t\t\tlist = append(list[:i], list[i+1:]...)\n\t\t}\n\t}\n\treturn list\n}",
"func deleteIntFromSlice(slice []int, elim int) []int {\n\n\tfor key, value := range slice {\n\t\tif value == elim {\n\t\t\treturn append(slice[:key], slice[key+1:]...)\n\t\t}\n\t}\n\n\treturn slice\n}",
"func RemoveValues[T comparable](slice, values []T) []T {\n\tif len(slice) == 0 {\n\t\treturn slice\n\t}\n\tkeys := make(map[T]struct{}, len(slice))\n\tfor _, v := range values {\n\t\tkeys[v] = struct{}{}\n\t}\n\n\tvar i int\n\tfor _, v := range slice {\n\t\tif _, ok := keys[v]; !ok {\n\t\t\tslice[i] = v\n\t\t\ti++\n\t\t}\n\t}\n\treturn slice[:i]\n}",
"func remove(slice []int, i int) []int{\n\tcopy(slice[i:],slice[i+1:])\n\treturn slice[:len(slice)-1]\n}",
"func RemoveWithKeepOrder(slice []string, s int) []string {\n\treturn append(slice[:s], slice[s+1:]...)\n}",
"func Remove(slice []string, value string) []string {\n\tfor i, s := range slice {\n\t\tif s == value {\n\t\t\tslice = append(slice[:i], slice[i+1:]...)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn slice\n}",
"func SubtractSlice(target []int, values []int) (result []int) {\n\tfor _, item := range target {\n\t\tif !ContainsInt(values, item) {\n\t\t\tresult = append(result, item)\n\t\t}\n\t}\n\n\treturn result\n}",
"func removeFirst(sliceArg []int) []int {\n\t//This modifies the original slice!??\n\treturn append(sliceArg[:0], sliceArg[1:]...)\n}",
"func Without(slice interface{}, values ...interface{}) (interface{}, error) {\n\n\tsliceVal := reflect.ValueOf(slice)\n\tif sliceVal.Type().Kind() != reflect.Slice {\n\t\treturn nil, errors.New(\"godash: invalid parameter type. Without func expects parameter 1 to be a slice\")\n\t}\n\tfor _, v := range values {\n\t\tif sliceVal.Type().Elem() != reflect.TypeOf(v) {\n\t\t\treturn nil, errors.New(\"godash: invalid parameter type. Without func expects additional parameters to match the type of the provided slice\")\n\t\t}\n\t}\n\n\tdest := reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf(slice).Elem()), 0, sliceVal.Len())\n\n\tfor i := 0; i < sliceVal.Len(); i++ {\n\t\tremove := false\n\t\tfor _, v := range values {\n\t\t\tif reflect.DeepEqual(sliceVal.Index(i).Interface(), v) {\n\t\t\t\tremove = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !remove {\n\t\t\tdest = reflect.Append(dest, sliceVal.Index(i))\n\t\t}\n\t}\n\treturn dest.Interface(), nil\n\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ChooseRandomString returns a random string from the given slice. | func ChooseRandomString(slice []string) string {
switch len(slice) {
case 0:
return ""
case 1:
return slice[0]
default:
return slice[rand.Intn(len(slice))]
}
} | [
"func GetRandomStringFromSlice(slice []string) string {\n\trand.Seed(time.Now().UnixNano())\n\n\treturn slice[rand.Intn(len(slice))]\n}",
"func ChooseRandomString(sl []string) string {\n\tif sl == nil {\n\t\treturn \"\"\n\t}\n\treturn sl[rand.Intn(len(sl))]\n}",
"func ChooseString(l []string) string {\n\tif len(l) == 0 {\n\t\treturn \"\"\n\t}\n\trand.Seed(time.Now().UnixNano())\n\treturn l[rand.Intn(len(l))]\n}",
"func (h *Random) StringFromSlice(in []string) string {\n\trandomIndex := rand.Intn(len(in))\n\treturn in[randomIndex]\n}",
"func randChoice(elems []string) string {\n\treturn elems[rand.Intn(len(elems))]\n}",
"func random_choice(words []string) string {\n\treturn words[rand.Intn(len(words))]\n}",
"func (ur UnicodeRanges) randString(r *rand.Rand) string {\n\tn := r.Intn(20)\n\tsb := strings.Builder{}\n\tsb.Grow(n)\n\tfor i := 0; i < n; i++ {\n\t\tsb.WriteRune(ur[r.Intn(len(ur))].choose(r))\n\t}\n\treturn sb.String()\n}",
"func (ur UnicodeRange) randString(r *rand.Rand) string {\n\tn := r.Intn(20)\n\tsb := strings.Builder{}\n\tsb.Grow(n)\n\tfor i := 0; i < n; i++ {\n\t\tsb.WriteRune(ur.choose(r))\n\t}\n\treturn sb.String()\n}",
"func (h *Haikunator) randomString(s []string) string {\n\tsize := len(s)\n\n\tif size <= 0 {\n\t\treturn \"\"\n\t}\n\n\treturn s[h.Random.Intn(size)]\n}",
"func getRandomString(length int) (string, error) {\n\tbuf := make([]byte, length)\n\tif _, err := rand.Read(buf); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor i := 0; i < length; {\n\t\tidx := int(buf[i] & letterIdxMask)\n\t\tif idx < letterSize {\n\t\t\tbuf[i] = letters[idx]\n\t\t\ti++\n\t\t} else {\n\t\t\tif _, err := rand.Read(buf[i : i+1]); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t}\n\treturn string(buf), nil\n}",
"func GetRandomStr(r *rand.Rand, arr []string) string {\n\treturn arr[r.Intn(len(arr))]\n}",
"func GenerateRandomString(stringLen int) string {\n\tb := make([]byte, stringLen)\n\tfor i := range b {\n\t\tb[i] = charset[seededRand.Intn(len(charset))]\n\t}\n\treturn string(b)\n}",
"func StringRand(length int) string {\n\treturn StringRandWithCharset(length, CharsetDefault)\n}",
"func RandomString(n int) string {\n\treturn string(Random(n))\n}",
"func RandomString(length int, strChars string) string {\n\trand.Seed(time.Now().UnixNano())\n\tchars := []rune(strChars)\n\tfmt.Println(chars)\n\tvar b strings.Builder\n\tfor i := 0; i < length; i++ {\n\t\tb.WriteRune(chars[rand.Intn(len(chars))])\n\t}\n\treturn b.String()\n}",
"func RandomString(length int) string {\n\treturn randomstring.String(length)\n}",
"func RandomString(rand *rand.Rand, size int) string {\n\tsb := strings.Builder{}\n\tfor sb.Len() <= size {\n\t\tsb.WriteRune(RandomRune(rand, 2, 5))\n\t}\n\tret := sb.String()\n\t_, lastRuneSize := utf8.DecodeLastRuneInString(ret)\n\treturn ret[0 : len(ret)-lastRuneSize]\n}",
"func RandomString(length int) string {\n\trandomString := \"\"\n\n\tfor len(randomString) < length {\n\t\trandomString += strconv.Itoa(rand.Int())\n\t}\n\n\treturn randomString[:length]\n}",
"func RandString(n int) string {\n\tb := make([]byte, n)\n\n\tfor i := range b {\n\t\tb[i] = rCharSet[rand.Intn(len(rCharSet))]\n\t}\n\n\treturn string(b)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CheckCertificateFormatFlag checks if the certificate format is valid. | func CheckCertificateFormatFlag(s string) (string, error) {
switch s {
case constants.CertificateFormatStandard, teleport.CertificateFormatOldSSH, teleport.CertificateFormatUnspecified:
return s, nil
default:
return "", trace.BadParameter("invalid certificate format parameter: %q", s)
}
} | [
"func (cd *ChainDoc) IsValidFormat() bool {\n\tif cd.Created == 0 || cd.GetType() != int(ChainDIDType) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (da *DefaultAuth) CheckFormat() error {\n\treturn nil\n}",
"func (dd *AccountDoc) IsValidFormat() bool {\n\tif dd.Created == 0 || dd.GetType() != int(AccountDIDType) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (cg *CertGenerator) ValidateCertificate(addr string, authOptions *auth.Options) (bool, error) {\n\ttlsConfig, err := cg.ReadTLSConfig(addr, authOptions)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tdialer := &net.Dialer{\n\t\tTimeout: time.Second * 40,\n\t}\n\n\t_, err = tls.DialWithDialer(dialer, \"tcp\", addr, tlsConfig)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}",
"func FormatCheck(i Info) bool {\n\treturn (C.sf_format_check((*C.SF_INFO)(unsafe.Pointer(&i))) == C.SF_TRUE)\n}",
"func CheckCertificate(crt string) {\n\t// Read and parse the PEM certificate file\n\tpemData, err := ioutil.ReadFile(crt)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tblock, rest := pem.Decode([]byte(pemData))\n\tif block == nil || len(rest) > 0 {\n\t\tlog.Fatal(\"Certificate decoding error\")\n\t}\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Print the certificate\n\tresult, err := certinfo.CertificateText(cert)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Print(result)\n}",
"func validateFormat(format string) bool {\n\tfor _, allowedFormat := range []string{\"xml\", \"json\", \"human-readable\"} {\n\t\tif format == allowedFormat {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func ValidFormat(f string) bool {\n\tfor _, v := range supportedFormats() {\n\t\tif v[0] == f || v[1] == f {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (m *X509Certificate) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateNotAfter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNotBefore(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (h *DeviceHandler) CheckDeviceNameFormat(_ context.Context, arg keybase1.CheckDeviceNameFormatArg) (bool, error) {\n\tok := libkb.CheckDeviceName.F(arg.Name)\n\tif ok {\n\t\treturn ok, nil\n\t}\n\treturn false, errors.New(libkb.CheckDeviceName.Hint)\n}",
"func ValidFormat(format string) bool {\n\tfor _, f := range fmtsByStandard {\n\t\tif f == format {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func isValidCertificate(c []byte) bool {\n\tp, _ := pem.Decode(c)\n\tif p == nil {\n\t\treturn false\n\t}\n\tif _, err := x509.ParseCertificates(p.Bytes); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (o *Options) checkFormatOptions() error {\n\t// Validate the output format and template\n\tlogrus.Infof(\"Using output format: %s\", o.Format)\n\tif o.Format == FormatMarkdown && o.GoTemplate != GoTemplateDefault {\n\t\tif !strings.HasPrefix(o.GoTemplate, GoTemplatePrefix) {\n\t\t\treturn fmt.Errorf(\"go template has to be prefixed with %q\", GoTemplatePrefix)\n\t\t}\n\n\t\ttemplatePathOrOnline := strings.TrimPrefix(o.GoTemplate, GoTemplatePrefix)\n\t\t// Verify if template file exists\n\t\tif !strings.HasPrefix(templatePathOrOnline, GoTemplatePrefixInline) {\n\t\t\tfileStats, err := os.Stat(templatePathOrOnline)\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn fmt.Errorf(\"could not find template file (%s)\", templatePathOrOnline)\n\t\t\t}\n\t\t\tif fileStats.Size() == 0 {\n\t\t\t\treturn fmt.Errorf(\"template file %s is empty\", templatePathOrOnline)\n\t\t\t}\n\t\t}\n\t}\n\tif o.Format == FormatJSON && o.GoTemplate != GoTemplateDefault {\n\t\treturn errors.New(\"go-template cannot be defined when in JSON mode\")\n\t}\n\tif o.Format != FormatJSON && o.Format != FormatMarkdown {\n\t\treturn fmt.Errorf(\"invalid format: %s\", o.Format)\n\t}\n\treturn nil\n}",
"func ValidateFormatVersion(formatVersion uint32) (bool) {\n if formatVersion == 1 || formatVersion == 2 || formatVersion == 3 || formatVersion == 4 { //format version should still be 1 for now\n return true\n }\n return false\n}",
"func checkFormatServerConfAuth(c ServerConfig) (ok bool) {\n\tok = false\n\tif c.Pass != \"\" || c.Key != \"\" || c.Cert != \"\" {\n\t\tok = true\n\t}\n\n\tif c.AgentAuth == true {\n\t\tok = true\n\t}\n\n\tif c.PKCS11Use == true {\n\t\t_, err := os.Stat(c.PKCS11Provider)\n\t\tif err == nil {\n\t\t\tok = true\n\t\t}\n\t}\n\n\tif len(c.Keys) > 0 || len(c.Passes) > 0 {\n\t\tok = true\n\t}\n\n\treturn\n}",
"func PossibleCertificateFormatValues() []CertificateFormat {\n\treturn []CertificateFormat{CertificateFormatCer, CertificateFormatPfx}\n}",
"func (c *Certificate) Validate() error {\n\tif _, err := time.ParseDuration(c.ValidityDuration); err != nil {\n\t\treturn fmt.Errorf(\"parsing validity duration %q for certificate: %w\", c.ValidityDuration, err)\n\t}\n\n\tfor _, i := range c.IPAddresses {\n\t\tif ip := net.ParseIP(i); ip == nil {\n\t\t\treturn fmt.Errorf(\"parsing IP address %q\", i)\n\t\t}\n\t}\n\n\tif c.RSABits == 0 {\n\t\treturn fmt.Errorf(\"RSA bits can't be 0\")\n\t}\n\n\treturn nil\n}",
"func (recv *Variant) CheckFormatString(formatString string, copyOnly bool) bool {\n\tc_format_string := C.CString(formatString)\n\tdefer C.free(unsafe.Pointer(c_format_string))\n\n\tc_copy_only :=\n\t\tboolToGboolean(copyOnly)\n\n\tretC := C.g_variant_check_format_string((*C.GVariant)(recv.native), c_format_string, c_copy_only)\n\tretGo := retC == C.TRUE\n\n\treturn retGo\n}",
"func (s *CertificatesService) Validate(body *CertificateCreate) error {\n\tenc, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.client.Post(\"/v1/certificates/validate\", enc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ReadAtMost reads up to limit bytes from r, and reports an error when limit bytes are read. | func ReadAtMost(r io.Reader, limit int64) ([]byte, error) {
limitedReader := &io.LimitedReader{R: r, N: limit}
data, err := io.ReadAll(limitedReader)
if err != nil {
return data, err
}
if limitedReader.N <= 0 {
return data, ErrLimitReached
}
return data, nil
} | [
"func readAtMost(r io.Reader, limit int64) ([]byte, error) {\n\tlimitedReader := &io.LimitedReader{R: r, N: limit}\n\tdata, err := ioutil.ReadAll(limitedReader)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\tif limitedReader.N <= 0 {\n\t\treturn data, ErrLimitReached\n\t}\n\treturn data, nil\n}",
"func (c Conn) LimitedRead(b []byte) (int, error) {\n\tr := io.LimitReader(c.Conn, c.maxReadBuffer)\n\treturn r.Read(b)\n}",
"func ReadLimited(r io.Reader, limit int64) ([]byte, error) {\n\treturn ioutil.ReadAll(&io.LimitedReader{R: r, N: limit})\n}",
"func tryReadFull(r io.Reader, b []byte) (n int, err error) {\n\tfor len(b) > n && err == nil {\n\t\tvar nn int\n\t\tnn, err = r.Read(b[n:])\n\t\tn += nn\n\t}\n\tif len(b) == n && err == io.EOF {\n\t\terr = nil\n\t}\n\treturn n, err\n}",
"func ReadLimit(l uint) ReadOption {\n\treturn func(r *ReadOptions) {\n\t\tr.Limit = l\n\t}\n}",
"func (rr *Reader) ReadSizeWithLimit(limit uint32) int {\n\tif rr.Err != nil {\n\t\treturn 0\n\t}\n\tvar size32 uint32\n\tsize32, rr.Err = size32Decode(func() (byte, error) {\n\t\treturn rr.ReadByte(), rr.Err\n\t})\n\tif size32 > limit && rr.Err == nil {\n\t\trr.Err = errors.New(\"read size limit overflow\")\n\t\treturn 0\n\t}\n\treturn int(size32)\n}",
"func isReadLimitReached(bytesLoaded int64, linesLoaded int64, logFilePosition string) bool {\n\treturn (logFilePosition == logs.Beginning && bytesLoaded >= byteReadLimit) ||\n\t\t(logFilePosition == logs.End && linesLoaded >= lineReadLimit)\n}",
"func readFull(r io.Reader, buf []byte) (n int, err error) {\n\tfor n < len(buf) && err == nil {\n\t\tvar nn int\n\t\tnn, err = r.Read(buf[n:])\n\t\tn += nn\n\t}\n\tif n == len(buf) {\n\t\terr = nil\n\t}\n\treturn\n}",
"func mustReadFull(r io.Reader, b []byte) (int, error) {\n\tn, err := tryReadFull(r, b)\n\tif err == io.EOF {\n\t\terr = io.ErrUnexpectedEOF\n\t}\n\treturn n, err\n}",
"func limit(n int64) int {\n\tif n < 0 || maxio < n {\n\t\tFatal(\"bad io size:\", n)\n\t}\n\treturn int(n)\n}",
"func readFull(r io.Reader, buf []byte) (int, error) {\n\tvar n int\n\tvar err error\n\tfor n < len(buf) && err == nil {\n\t\tvar nn int\n\t\tnn, err = r.Read(buf[n:])\n\t\tn += nn\n\t}\n\tif n == len(buf) {\n\t\treturn n, nil\n\t}\n\tif err == io.EOF {\n\t\treturn n, io.ErrUnexpectedEOF\n\t}\n\treturn n, err\n}",
"func (r *reader) available(off, max int64) (ret int64) {\n\toff += r.offset\n\tfor max > 0 {\n\t\treq, ok := r.t.offsetRequest(off)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tif !r.t.haveChunk(req) {\n\t\t\tbreak\n\t\t}\n\t\tlen1 := int64(req.Length) - (off - r.t.requestOffset(req))\n\t\tmax -= len1\n\t\tret += len1\n\t\toff += len1\n\t}\n\t// Ensure that ret hasn't exceeded our original max.\n\tif max < 0 {\n\t\tret += max\n\t}\n\treturn\n}",
"func (c *conn) Read(b []byte) (int, error) {\n\tc.ronce.Do(c.sleepLatency)\n\n\tn, err := c.rb.FillThrottle(func(remaining int64) (int64, error) {\n\t\tmax := remaining\n\t\tif l := int64(len(b)); max > l {\n\t\t\tmax = l\n\t\t}\n\n\t\tn, err := c.Conn.Read(b[:max])\n\t\treturn int64(n), err\n\t})\n\tif err != nil && err != io.EOF {\n\t\tlog.Errorf(\"trafficshape: error on throttled read: %v\", err)\n\t}\n\n\treturn int(n), err\n}",
"func (c *Conn) Read(b []byte) (int, error) {\n\tc.ronce.Do(c.sleepLatency)\n\n\tn, err := c.ReadBucket.FillThrottle(func(remaining int64) (int64, error) {\n\t\tmax := remaining\n\t\tif l := int64(len(b)); max > l {\n\t\t\tmax = l\n\t\t}\n\n\t\tn, err := c.conn.Read(b[:max])\n\t\treturn int64(n), err\n\t})\n\tif err != nil && err != io.EOF {\n\t\tlog.Errorf(\"trafficshape: error on throttled read: %v\", err)\n\t}\n\n\treturn int(n), err\n}",
"func (d *Decoder) readMore() {\n\tif d.complete {\n\t\treturn\n\t}\n\tn := cap(d.buf) - len(d.buf)\n\tif n < minRead {\n\t\t// We need to grow the buffer. Note that we don't have to copy\n\t\t// the unused part of the buffer (d.buf[:d.r0]).\n\t\t// TODO provide a way to limit the maximum size that\n\t\t// the buffer can grow to.\n\t\tused := len(d.buf) - d.r0\n\t\tn1 := cap(d.buf) * 2\n\t\tif n1-used < minGrow {\n\t\t\tn1 = used + minGrow\n\t\t}\n\t\tbuf1 := make([]byte, used, n1)\n\t\tcopy(buf1, d.buf[d.r0:])\n\t\td.buf = buf1\n\t\td.r1 -= d.r0\n\t\td.r0 = 0\n\t}\n\tn, err := d.rd.Read(d.buf[len(d.buf):cap(d.buf)])\n\td.buf = d.buf[:len(d.buf)+n]\n\tif err == nil {\n\t\treturn\n\t}\n\td.complete = true\n\tif err != io.EOF {\n\t\td.err = err\n\t}\n}",
"func (r *reader) readOnceAt(b []byte, pos int64, ctxErr *error) (n int, err error) {\n\tif pos >= r.length {\n\t\terr = io.EOF\n\t\treturn\n\t}\n\tfor {\n\t\tavail := r.waitAvailable(pos, int64(len(b)), ctxErr)\n\t\tif avail == 0 {\n\t\t\tif r.t.closed.IsSet() {\n\t\t\t\terr = errors.New(\"torrent closed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif *ctxErr != nil {\n\t\t\t\terr = *ctxErr\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tpi := peer_protocol.Integer(r.torrentOffset(pos) / r.t.info.PieceLength)\n\t\tip := r.t.info.Piece(int(pi))\n\t\tpo := r.torrentOffset(pos) % r.t.info.PieceLength\n\t\tb1 := missinggo.LimitLen(b, ip.Length()-po, avail)\n\t\tn, err = r.t.readAt(b1, r.torrentOffset(pos))\n\t\tif n != 0 {\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t\tr.t.cl.lock()\n\t\t// TODO: Just reset pieces in the readahead window. This might help\n\t\t// prevent thrashing with small caches and file and piece priorities.\n\t\tlog.Printf(\"error reading torrent %q piece %d offset %d, %d bytes: %s\", r.t, pi, po, len(b1), err)\n\t\tr.t.updateAllPieceCompletions()\n\t\tr.t.updateAllPiecePriorities()\n\t\tr.t.cl.unlock()\n\t}\n}",
"func (r *Reader) Unlimit() {\n\tr.newLimit <- nil\n}",
"func (c *Conn) setReadRemaining(n int64) error {\n\tif n < 0 {\n\t\treturn ErrReadLimit\n\t}\n\n\tc.readRemaining = n\n\treturn nil\n}",
"func (b *FixedSizeRingBuf) ReadWithoutAdvance(p []byte) (n int, err error) {\n\treturn b.ReadAndMaybeAdvance(p, false)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
HasPrefixAny determines if any of the string values have the given prefix. | func HasPrefixAny(prefix string, values []string) bool {
for _, val := range values {
if strings.HasPrefix(val, prefix) {
return true
}
}
return false
} | [
"func StartsWithAny(str string, prefixes ...string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif internalStartsWith(str, (string)(prefix), false) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasAnyPrefix(s string, prefixList []string) bool {\n\tfor _, prefix := range prefixList {\n\t\tif strings.HasPrefix(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasAnyPrefix(text string, slice []string) bool {\n\tfor _, s := range slice {\n\t\tif strings.HasPrefix(text, s) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func StartsWithAnyIgnoreCase(str string, prefixes ...string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif internalStartsWith(str, (string)(prefix), true) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasPrefixAnyI(s string, prefixes ...string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif stringsutil.HasPrefixI(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (v *StringHasPrefixAny) Validate(e *validator.Errors) {\n\t// if no required prefixes - string is valid\n\tif v.ComparedField == nil || len(v.ComparedField) == 0 {\n\t\treturn\n\t}\n\n\tfor _, s := range v.ComparedField {\n\t\tif strings.HasPrefix(v.Field, s) {\n\t\t\treturn\n\t\t}\n\t}\n\n\te.Add(v.Name, StringHasPrefixAnyError(v))\n}",
"func StringsHasPrefix(s []string, p string) bool {\n\tfor _, x := range s {\n\t\tif !strings.HasPrefix(x, p) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func HasPrefix(s string, p ...string) bool {\n\tfor _, i := range p {\n\t\tif strings.HasPrefix(s, i) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func HasPrefix(s string, prefixes ...string) bool {\n\tfor _, p := range prefixes {\n\t\tif strings.HasPrefix(s, p) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func containsPrefix(needle string, prefixes []string) bool {\n\tfor _, prefix := range prefixes {\n\t\tif strings.HasPrefix(needle, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (s StringSet) IncludesAny(values []string) bool {\n\tfor _, v := range values {\n\t\tif _, ok := s[v]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func ContainsPrefix(a []string, prefix string) bool {\n\treturn IndexFunc(a, ValueHasPrefix(prefix)) != -1\n}",
"func HasPrefix(labels map[string]string, prefix string) bool {\n\tfor name, value := range labels {\n\t\tif strings.HasPrefix(name, prefix) && len(value) > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasOnePrefix(s string, subs []string) bool {\n\tfor _, sub := range subs {\n\t\tif strings.HasPrefix(s, sub) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasOneOfPrefixesFold(str string, prefixes ...string) bool {\n\tfor _, pre := range prefixes {\n\t\tif HasPrefixFold(str, pre) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func HasPrefix(prefix, operand string) bool { return strings.HasPrefix(operand, prefix) }",
"func PrefixInList(list []string, prefix string) bool {\n\tfor _, s := range list {\n\t\tif strings.HasPrefix(s, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func AnyPrefixMatcher(strs ...string) MatcherFunc {\n\ttree := ternary_search_tree.New(strs...)\n\treturn func(_ io.Writer, r io.Reader) bool {\n\t\tbuf := make([]byte, tree.Depth())\n\t\tn, _ := io.ReadFull(r, buf)\n\t\t_, _, ok := tree.Follow(string(buf[:n]))\n\t\treturn ok\n\t}\n}",
"func HasPrefix(s, prefix string) bool"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Equals checks two matches for equality | func (m *Match) Equals(other *Match) bool {
if m == nil && other == nil {
return true
} else if m == nil {
return false
} else if other == nil {
return false
}
return m.PC == other.PC &&
m.StartLine == other.StartLine &&
m.StartColumn == other.StartColumn &&
m.EndLine == other.EndLine &&
m.EndColumn == other.EndColumn &&
bytes.Equal(m.Bytes, other.Bytes)
} | [
"func (m *MatchData) IsEqual(m2 *MatchData) bool {\n\tif len(m.result) != len(m2.result) {\n\t\treturn false\n\t}\n\n\tfor i, v := range m.result {\n\t\tif v != m2.result[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn (m.r.String() == m2.r.String()) && (m.s == m2.s)\n}",
"func Eq(obj1 any, obj2 any) bool",
"func TestEquals(t *testing.T) {\n\tt.Parallel()\n\tfor ti, tt := range []struct {\n\t\tm1, m2 MatrixExp\n\t\teq bool\n\t}{\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: true,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 10),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(10, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralOnes(1, 1),\n\t\t\teq: false,\n\t\t},\n\t} {\n\t\tif v := Equals(tt.m1, tt.m2); v != tt.eq {\n\t\t\tt.Errorf(\"%d: Equals(%v,%v) equals %v, want %v\", ti, tt.m1, tt.m2, v, tt.eq)\n\t\t}\n\t}\n}",
"func (oi ObjectIdentifier) Equal(other ObjectIdentifier) bool {}",
"func MatchEqual(m1, m2 *Match) bool {\n\treturn bytes.Equal(m1.match, m2.match) && m1.pos == m2.pos\n}",
"func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func equal(a, b interface{}) bool {\n\treturn a == b\n}",
"func filterChainMatchEqual(first *listener.FilterChainMatch, second *listener.FilterChainMatch) bool {\n\tif first == nil || second == nil {\n\t\treturn first == second\n\t}\n\tif first.TransportProtocol != second.TransportProtocol {\n\t\treturn false\n\t}\n\tif !util.StringSliceEqual(first.ApplicationProtocols, second.ApplicationProtocols) {\n\t\treturn false\n\t}\n\tif first.DestinationPort.GetValue() != second.DestinationPort.GetValue() {\n\t\treturn false\n\t}\n\tif !util.CidrRangeSliceEqual(first.PrefixRanges, second.PrefixRanges) {\n\t\treturn false\n\t}\n\tif !util.CidrRangeSliceEqual(first.SourcePrefixRanges, second.SourcePrefixRanges) {\n\t\treturn false\n\t}\n\tif first.AddressSuffix != second.AddressSuffix {\n\t\treturn false\n\t}\n\tif first.SuffixLen.GetValue() != second.SuffixLen.GetValue() {\n\t\treturn false\n\t}\n\tif first.SourceType != second.SourceType {\n\t\treturn false\n\t}\n\tif !util.UInt32SliceEqual(first.SourcePorts, second.SourcePorts) {\n\t\treturn false\n\t}\n\tif !util.StringSliceEqual(first.ServerNames, second.ServerNames) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func same(x, y groupList) bool {\n\tsx := x.toSet()\n\tsy := y.toSet()\n\treturn reflect.DeepEqual(sx, sy)\n}",
"func (seq SeqEq[S, T]) Equal(a, b S) bool {\n\tseqA := a\n\tseqB := b\n\tfor !seq.Seq.IsVoid(seqA) && !seq.Seq.IsVoid(seqB) {\n\t\theadA := seq.Seq.Head(seqA)\n\t\theadB := seq.Seq.Head(seqB)\n\t\tif headA == nil || headB == nil || !seq.Eq.Equal(*headA, *headB) {\n\t\t\treturn false\n\t\t}\n\n\t\tseqA = seq.Seq.Tail(seqA)\n\t\tseqB = seq.Seq.Tail(seqB)\n\t}\n\n\treturn seq.Seq.IsVoid(seqA) && seq.Seq.IsVoid(seqB)\n}",
"func (t Tags) eq(u Tags) bool {\n\tif len(t) != len(u) {\n\t\treturn false\n\t}\n\tfor i := range t {\n\t\tif t[i] != u[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (t *TokenizedInstruction) Equal(t1 *TokenizedInstruction) bool {\n\tif t.Name != t1.Name {\n\t\treturn false\n\t}\n\tif len(t.Arguments) != len(t1.Arguments) {\n\t\treturn false\n\t}\n\tfor i, arg := range t.Arguments {\n\t\tif *arg != *t1.Arguments[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func (h *HeaderMatch) Equal(o *HeaderMatch) bool {\n\tif h.Mismatch != o.Mismatch ||\n\t\th.Name != o.Name ||\n\t\th.Value != o.Value ||\n\t\t!h.Secret.Equal(o.Secret) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func Equal(g1, g2 InstanceGroup) bool {\n\tif g1 == g2 {\n\t\treturn true\n\t}\n\n\tif g1.App() != g2.App() {\n\t\treturn false\n\t}\n\n\tif g1.Account() != g2.Account() {\n\t\treturn false\n\t}\n\n\tr1, ok1 := g1.Region()\n\tr2, ok2 := g2.Region()\n\tif ok1 != ok2 {\n\t\treturn false\n\t}\n\n\tif ok1 && (r1 != r2) {\n\t\treturn false\n\t}\n\n\ts1, ok1 := g1.Stack()\n\ts2, ok2 := g2.Stack()\n\n\tif ok1 != ok2 {\n\t\treturn false\n\t}\n\n\tif ok1 && (s1 != s2) {\n\t\treturn false\n\t}\n\n\tc1, ok1 := g1.Cluster()\n\tc2, ok2 := g2.Cluster()\n\n\tif ok1 != ok2 {\n\t\treturn false\n\t}\n\n\tif ok1 && (c1 != c2) {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (recv *MatchInfo) Equals(other *MatchInfo) bool {\n\treturn other.ToC() == recv.ToC()\n}",
"func Equals(a, b interface{}) bool {\n\treturn neogointernal.Opcode2(\"EQUAL\", a, b).(bool)\n}",
"func (o *Object) equal(e *Object) (eq bool) {\n\teq = bytes.Compare(o.Val, e.Val) == 0 &&\n\t\to.RC == e.RC &&\n\t\to.Access.UnixNano() == e.Access.UnixNano() &&\n\t\to.Create.UnixNano() == e.Create.UnixNano()\n\treturn\n}",
"func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase int:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase int64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float32:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase string, byte:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
LexerEngine does the actual tokenization of the byte slice text using the NFA bytecode in program. If the lexing process fails the Scanner will return an UnconsumedInput error. | func LexerEngine(program inst.Slice, text []byte) Scanner {
done := false
matchPC := -1
matchTC := -1
prevTC := 0
line := 1
col := 1
var scan Scanner
var cqueue, nqueue *queue.Queue = queue.New(len(program)), queue.New(len(program))
scan = func(tc int) (int, *Match, error, Scanner) {
if done && tc == len(text) {
return tc, nil, nil, nil
}
startTC := tc
if tc < matchTC {
// we back-tracked so reset the last matchTC
matchTC = -1
} else if tc == matchTC {
// the caller did not reset the tc, we are where we left
} else if matchTC != -1 && tc > matchTC {
// we skipped text
matchTC = tc
}
cqueue.Clear()
nqueue.Clear()
cqueue.Push(0)
for ; tc <= len(text); tc++ {
if cqueue.Empty() {
break
}
for !cqueue.Empty() {
pc := cqueue.Pop()
i := program[pc]
switch i.Op {
case inst.CHAR:
x := byte(i.X)
y := byte(i.Y)
if tc < len(text) && x <= text[tc] && text[tc] <= y {
nqueue.Push(pc + 1)
}
case inst.MATCH:
if matchTC < tc {
matchPC = int(pc)
matchTC = tc
} else if matchPC > int(pc) {
matchPC = int(pc)
matchTC = tc
}
case inst.JMP:
cqueue.Push(i.X)
case inst.SPLIT:
cqueue.Push(i.X)
cqueue.Push(i.Y)
default:
panic(fmt.Errorf("unexpected instruction %v", i))
}
}
cqueue, nqueue = nqueue, cqueue
if cqueue.Empty() && matchPC > -1 {
line, col = computeLineCol(text, prevTC, startTC, line, col)
eLine, eCol := computeLineCol(text, startTC, matchTC-1, line, col)
match := &Match{
PC: matchPC,
TC: startTC,
StartLine: line,
StartColumn: col,
EndLine: eLine,
EndColumn: eCol,
Bytes: text[startTC:matchTC],
}
prevTC = startTC
matchPC = -1
return tc, match, nil, scan
}
}
if matchTC != len(text) && startTC >= len(text) {
// the user has moved us farther than the text. Assume that was
// the intent and return EOF.
return tc, nil, nil, nil
} else if matchTC != len(text) {
done = true
if matchTC == -1 {
matchTC = 0
}
sline, scol := computeLineCol(text, 0, startTC, 1, 1)
fline, fcol := computeLineCol(text, 0, tc, 1, 1)
err := &UnconsumedInput{
StartTC: startTC,
FailTC: tc,
StartLine: sline,
StartColumn: scol,
FailLine: fline,
FailColumn: fcol,
Text: text,
}
return tc, nil, err, scan
} else {
return tc, nil, nil, nil
}
}
return scan
} | [
"func (l *promlexer) Lex() token {\n\tif l.i >= len(l.b) {\n\t\treturn tEOF\n\t}\n\tc := l.b[l.i]\n\tl.start = l.i\n\nyystate0:\n\n\tswitch yyt := l.state; yyt {\n\tdefault:\n\t\tpanic(fmt.Errorf(`invalid start condition %d`, yyt))\n\tcase 0: // start condition: INITIAL\n\t\tgoto yystart1\n\tcase 1: // start condition: sComment\n\t\tgoto yystart8\n\tcase 2: // start condition: sMeta1\n\t\tgoto yystart19\n\tcase 3: // start condition: sMeta2\n\t\tgoto yystart21\n\tcase 4: // start condition: sLabels\n\t\tgoto yystart24\n\tcase 5: // start condition: sLValue\n\t\tgoto yystart29\n\tcase 6: // start condition: sValue\n\t\tgoto yystart33\n\tcase 7: // start condition: sTimestamp\n\t\tgoto yystart36\n\t}\n\nyystate1:\n\tc = l.next()\nyystart1:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '#':\n\t\tgoto yystate5\n\tcase c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate7\n\tcase c == '\\n':\n\t\tgoto yystate4\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\tcase c == '\\x00':\n\t\tgoto yystate2\n\t}\n\nyystate2:\n\tc = l.next()\n\tgoto yyrule1\n\nyystate3:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule3\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\t}\n\nyystate4:\n\tc = l.next()\n\tgoto yyrule2\n\nyystate5:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule5\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate6\n\t}\n\nyystate6:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule4\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate6\n\t}\n\nyystate7:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule10\n\tcase c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate7\n\t}\n\nyystate8:\n\tc = l.next()\nyystart8:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'H':\n\t\tgoto yystate9\n\tcase c == 'T':\n\t\tgoto yystate14\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\t}\n\nyystate9:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'E':\n\t\tgoto yystate10\n\t}\n\nyystate10:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'L':\n\t\tgoto yystate11\n\t}\n\nyystate11:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'P':\n\t\tgoto yystate12\n\t}\n\nyystate12:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate13\n\t}\n\nyystate13:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule6\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate13\n\t}\n\nyystate14:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'Y':\n\t\tgoto yystate15\n\t}\n\nyystate15:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'P':\n\t\tgoto yystate16\n\t}\n\nyystate16:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == 'E':\n\t\tgoto yystate17\n\t}\n\nyystate17:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate18\n\t}\n\nyystate18:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule7\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate18\n\t}\n\nyystate19:\n\tc = l.next()\nyystart19:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate20\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\t}\n\nyystate20:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule8\n\tcase c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate20\n\t}\n\nyystate21:\n\tc = l.next()\nyystart21:\n\tswitch {\n\tdefault:\n\t\tgoto yyrule9\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate23\n\tcase c >= '\\x01' && c <= '\\b' || c >= '\\v' && c <= '\\x1f' || c >= '!' && c <= 'ÿ':\n\t\tgoto yystate22\n\t}\n\nyystate22:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule9\n\tcase c >= '\\x01' && c <= '\\t' || c >= '\\v' && c <= 'ÿ':\n\t\tgoto yystate22\n\t}\n\nyystate23:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule3\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate23\n\tcase c >= '\\x01' && c <= '\\b' || c >= '\\v' && c <= '\\x1f' || c >= '!' && c <= 'ÿ':\n\t\tgoto yystate22\n\t}\n\nyystate24:\n\tc = l.next()\nyystart24:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == ',':\n\t\tgoto yystate25\n\tcase c == '=':\n\t\tgoto yystate26\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\tcase c == '}':\n\t\tgoto yystate28\n\tcase c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate27\n\t}\n\nyystate25:\n\tc = l.next()\n\tgoto yyrule15\n\nyystate26:\n\tc = l.next()\n\tgoto yyrule14\n\nyystate27:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule12\n\tcase c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':\n\t\tgoto yystate27\n\t}\n\nyystate28:\n\tc = l.next()\n\tgoto yyrule13\n\nyystate29:\n\tc = l.next()\nyystart29:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\"':\n\t\tgoto yystate30\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\t}\n\nyystate30:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\"':\n\t\tgoto yystate31\n\tcase c == '\\\\':\n\t\tgoto yystate32\n\tcase c >= '\\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':\n\t\tgoto yystate30\n\t}\n\nyystate31:\n\tc = l.next()\n\tgoto yyrule16\n\nyystate32:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c >= '\\x01' && c <= '\\t' || c >= '\\v' && c <= 'ÿ':\n\t\tgoto yystate30\n\t}\n\nyystate33:\n\tc = l.next()\nyystart33:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\tcase c == '{':\n\t\tgoto yystate35\n\tcase c >= '\\x01' && c <= '\\b' || c >= '\\v' && c <= '\\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':\n\t\tgoto yystate34\n\t}\n\nyystate34:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule17\n\tcase c >= '\\x01' && c <= '\\b' || c >= '\\v' && c <= '\\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':\n\t\tgoto yystate34\n\t}\n\nyystate35:\n\tc = l.next()\n\tgoto yyrule11\n\nyystate36:\n\tc = l.next()\nyystart36:\n\tswitch {\n\tdefault:\n\t\tgoto yyabort\n\tcase c == '\\n':\n\t\tgoto yystate37\n\tcase c == '\\t' || c == ' ':\n\t\tgoto yystate3\n\tcase c >= '0' && c <= '9':\n\t\tgoto yystate38\n\t}\n\nyystate37:\n\tc = l.next()\n\tgoto yyrule19\n\nyystate38:\n\tc = l.next()\n\tswitch {\n\tdefault:\n\t\tgoto yyrule18\n\tcase c >= '0' && c <= '9':\n\t\tgoto yystate38\n\t}\n\nyyrule1: // \\0\n\t{\n\t\treturn tEOF\n\t}\nyyrule2: // \\n\n\t{\n\t\tl.state = sInit\n\t\treturn tLinebreak\n\t\tgoto yystate0\n\t}\nyyrule3: // [ \\t]+\n\t{\n\t\treturn tWhitespace\n\t}\nyyrule4: // #[ \\t]+\n\t{\n\t\tl.state = sComment\n\t\tgoto yystate0\n\t}\nyyrule5: // #\n\t{\n\t\treturn l.consumeComment()\n\t}\nyyrule6: // HELP[\\t ]+\n\t{\n\t\tl.state = sMeta1\n\t\treturn tHelp\n\t\tgoto yystate0\n\t}\nyyrule7: // TYPE[\\t ]+\n\t{\n\t\tl.state = sMeta1\n\t\treturn tType\n\t\tgoto yystate0\n\t}\nyyrule8: // {M}({M}|{D})*\n\t{\n\t\tl.state = sMeta2\n\t\treturn tMName\n\t\tgoto yystate0\n\t}\nyyrule9: // {C}*\n\t{\n\t\tl.state = sInit\n\t\treturn tText\n\t\tgoto yystate0\n\t}\nyyrule10: // {M}({M}|{D})*\n\t{\n\t\tl.state = sValue\n\t\treturn tMName\n\t\tgoto yystate0\n\t}\nyyrule11: // \\{\n\t{\n\t\tl.state = sLabels\n\t\treturn tBraceOpen\n\t\tgoto yystate0\n\t}\nyyrule12: // {L}({L}|{D})*\n\t{\n\t\treturn tLName\n\t}\nyyrule13: // \\}\n\t{\n\t\tl.state = sValue\n\t\treturn tBraceClose\n\t\tgoto yystate0\n\t}\nyyrule14: // =\n\t{\n\t\tl.state = sLValue\n\t\treturn tEqual\n\t\tgoto yystate0\n\t}\nyyrule15: // ,\n\t{\n\t\treturn tComma\n\t}\nyyrule16: // \\\"(\\\\.|[^\\\\\"])*\\\"\n\t{\n\t\tl.state = sLabels\n\t\treturn tLValue\n\t\tgoto yystate0\n\t}\nyyrule17: // [^{ \\t\\n]+\n\t{\n\t\tl.state = sTimestamp\n\t\treturn tValue\n\t\tgoto yystate0\n\t}\nyyrule18: // {D}+\n\t{\n\t\treturn tTimestamp\n\t}\nyyrule19: // \\n\n\tif true { // avoid go vet determining the below panic will not be reached\n\t\tl.state = sInit\n\t\treturn tLinebreak\n\t\tgoto yystate0\n\t}\n\tpanic(\"unreachable\")\n\nyyabort: // no lexem recognized\n\t//\n\t// silence unused label errors for build and satisfy go vet reachability analysis\n\t//\n\t{\n\t\tif false {\n\t\t\tgoto yyabort\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate0\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate1\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate8\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate19\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate21\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate24\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate29\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate33\n\t\t}\n\t\tif false {\n\t\t\tgoto yystate36\n\t\t}\n\t}\n\n\t// Workaround to gobble up comments that started with a HELP or TYPE\n\t// prefix. We just consume all characters until we reach a newline.\n\t// This saves us from adding disproportionate complexity to the parser.\n\tif l.state == sComment {\n\t\treturn l.consumeComment()\n\t}\n\treturn tInvalid\n}",
"func (sc *Scanner) NextToken(expected []int) (int, interface{}, uint64, uint64) {\n\tif len(sc.lookahead) > 0 {\n\t\tsc.prepareNewRun()\n\t}\n\tfor sc.runeScanner.Scan() {\n\t\trune := sc.runeScanner.Bytes()\n\t\tclz, sz := sc.bidic(rune)\n\t\t//T().Debugf(\"'%s' has class %s\", string(rune), ClassString(clz))\n\t\tif clz != sc.currClz {\n\t\t\tsc.lookahead = sc.lookahead[:0]\n\t\t\tsc.lookahead = append(sc.lookahead, rune...)\n\t\t\tr := sc.currClz // tmp for returning current class\n\t\t\tsc.currClz = clz // change current class to class of LA\n\t\t\tT().Debugf(\"Token '%s' as :%s\", string(sc.buffer), ClassString(r))\n\t\t\treturn int(r), sc.buffer, sc.pos, uint64(len(sc.buffer))\n\t\t}\n\t\tsc.buffer = append(sc.buffer, rune...)\n\t\tsc.length += uint64(sz)\n\t}\n\tif len(sc.lookahead) > 0 {\n\t\t// sc.prepareNewRun()\n\t\tsc.lookahead = sc.lookahead[:0]\n\t\tsc.pos += sc.length // set new input position\n\t\tclz, sz := sc.bidic(sc.buffer) // calculate current bidi class\n\t\tsc.currClz = clz\n\t\tsc.length += uint64(sz) // include len(LA) in run's length\n\t\tT().Debugf(\"Token '%s' as :%s\", string(sc.buffer), ClassString(sc.currClz))\n\t\treturn int(sc.currClz), sc.buffer, sc.pos, uint64(len(sc.buffer))\n\t}\n\tif !sc.done {\n\t\tsc.done = true\n\t\tT().Debugf(\"Token :%s\", ClassString(bidi.PDI))\n\t\treturn int(bidi.PDI), \"\", sc.pos, 0\n\t}\n\treturn scanner.EOF, \"\", sc.pos, 0\n}",
"func NewLexer(br *bufio.Reader) *Lexer {\n\tb, err := br.ReadByte()\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn &Lexer{br, nil, getSingleByteArray(b), UnknownToken, nil}\n}",
"func initLexer() (*lex.Lexer, error) {\n\tlexer := lex.NewLexer()\n\n\tfor _, lit := range literals {\n\t\tr := \"\\\\\" + strings.Join(strings.Split(lit, \"\"), \"\\\\\")\n\t\tlexer.Add([]byte(r), tokenize(lit))\n\t}\n\tfor _, name := range keywords {\n\t\tlexer.Add([]byte(strings.ToLower(name)), tokenize(name))\n\t}\n\n\t// Words we don't want\n\tlexer.Add([]byte(\"([e|E]Book?.)\"), skip)\n\tlexer.Add([]byte(\"([e|E][T|t]ext?.)\"), skip)\n\tlexer.Add([]byte(\"(http?.)\"), skip)\n\tlexer.Add([]byte(\"(tm)\"), skip)\n\tlexer.Add([]byte(\"(HTML)\"), skip)\n\tlexer.Add([]byte(\"(ASCII)\"), skip)\n\t// Our main regex\n\tlexer.Add([]byte(`([a-z]|[A-Z])([a-z]|[A-Z]|[0-9]|_)*`), tokenize(\"WORD\"))\n\t// Skip new lines, carriage returns, etc..\n\tlexer.Add([]byte(\"( |\\t|\\n|\\r)+\"), skip)\n\t// Skip characters we don't care about in regular text\n\tlexer.Add([]byte(\"\\\\*|\\\\<|\\\\>|\\\\@|\\\\/|\\\\~|\\\\[|\\\\]|\\\\{|\\\\}|\\\\||\\\\_|\\\\^|\\\\$|\\\\#|\\\\%|\\\\+|\\\\=\"), skip)\n\t// Skip URLs\n\tlexer.Add([]byte(\"www.([a-z]|[A-Z])([a-z]|[A-Z]|[0-9]|_)*.([a-z]|[A-Z])([a-z]|[A-Z]|[0-9]|_)*\"), skip)\n\t// Ignore numbers\n\tlexer.Add([]byte(\"[0-9]\\\\.\"), skip)\n\tlexer.Add([]byte(\"[0-9]\"), skip)\n\n\terr := lexer.Compile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn lexer, nil\n}",
"func BenchmarkLexer(b *testing.B) {\n\tvar tracer trace.Trace\n\t//tracer = trace.New(os.Stderr, true) // use stderr to trace\n\ttracer = trace.New(ioutil.Discard, true) // and this to not\n\n\tfor i := 0; i < b.N; i++ {\n\t\txml_lexer.Lex(xmlInput, tracer) // xml\n\t\t//json_lexer.Lex(jsonInput, tracer) // json\n\t\t//csv_lexer.Lex(jsonInput, tracer) // csv\n\t}\n}",
"func TestScanner_Scan(t *testing.T) {\n\tvar tests = []struct {\n\t\ts string\n\t\ttok lang.Token\n\t\tlit string\n\t\tpos int\n\t}{\n\t\t// Special tokens (EOF, ILLEGAL, WS)\n\t\t{s: ``, tok: lang.EOF},\n\t\t{s: `#`, tok: lang.ILLEGAL, lit: `#`},\n\t\t{s: `+`, tok: lang.ILLEGAL, lit: `+`},\n\t\t{s: `-`, tok: lang.ILLEGAL, lit: `-`},\n\t\t{s: `*`, tok: lang.ILLEGAL, lit: `*`},\n\t\t{s: `/`, tok: lang.BADREGEX, lit: ``},\n\t\t{s: `%`, tok: lang.ILLEGAL, lit: `%`},\n\t\t{s: ` `, tok: lang.WS, lit: \" \"},\n\t\t{s: \"\\t\", tok: lang.WS, lit: \"\\t\"},\n\t\t{s: \"\\n\", tok: lang.WS, lit: \"\\n\"},\n\t\t{s: \"\\r\", tok: lang.WS, lit: \"\\n\"},\n\t\t{s: \"\\r\\n\", tok: lang.WS, lit: \"\\n\"},\n\t\t{s: \"\\rX\", tok: lang.WS, lit: \"\\n\"},\n\t\t{s: \"\\n\\r\", tok: lang.WS, lit: \"\\n\\n\"},\n\t\t{s: \" \\n\\t \\r\\n\\t\", tok: lang.WS, lit: \" \\n\\t \\n\\t\"},\n\t\t{s: \" foo\", tok: lang.WS, lit: \" \"},\n\n\t\t// Logical operators\n\t\t{s: `AND`, tok: lang.AND},\n\t\t{s: `and`, tok: lang.AND},\n\t\t{s: `|`, tok: lang.OR},\n\t\t{s: `OR`, tok: lang.OR},\n\t\t{s: `or`, tok: lang.OR},\n\t\t{s: `!`, tok: lang.NOT},\n\t\t{s: `NOT`, tok: lang.NOT},\n\t\t{s: `not`, tok: lang.NOT},\n\n\t\t// Misc. tokens\n\t\t{s: `(`, tok: lang.LPAREN},\n\t\t{s: `)`, tok: lang.RPAREN},\n\t\t{s: `,`, tok: lang.COMMA},\n\n\t\t// Identifiers\n\t\t{s: `required`, tok: lang.IDENT, lit: `required`},\n\t\t{s: `required()`, tok: lang.IDENT, lit: `required`},\n\t\t{s: `foo`, tok: lang.IDENT, lit: `foo`},\n\t\t{s: `phone`, tok: lang.IDENT, lit: `phone`},\n\t\t{s: `range(1,2)`, tok: lang.IDENT, lit: `range`},\n\n\t\t// Booleans\n\t\t{s: `true`, tok: lang.TRUE},\n\t\t{s: `false`, tok: lang.FALSE},\n\n\t\t// Strings\n\t\t{s: `'testing 123!'`, tok: lang.STRING, lit: `testing 123!`},\n\t\t{s: `'string'`, tok: lang.STRING, lit: `string`},\n\t\t{s: `'foo\\nbar'`, tok: lang.STRING, lit: \"foo\\nbar\"},\n\n\t\t// Numbers\n\t\t{s: `100`, tok: lang.INTEGER, lit: `100`},\n\t\t{s: `100.23`, tok: lang.NUMBER, lit: `100.23`},\n\t\t{s: `.23`, tok: lang.NUMBER, lit: `.23`},\n\t\t// {s: `.`, tok: lang.ILLEGAL, lit: `.`},\n\t\t{s: `10.3s`, tok: lang.NUMBER, lit: `10.3`},\n\n\t\t// Durations\n\t\t{s: `10u`, tok: lang.DURATION, lit: `10u`},\n\t\t{s: `10µ`, tok: lang.DURATION, lit: `10µ`},\n\t\t{s: `10ms`, tok: lang.DURATION, lit: `10ms`},\n\t\t{s: `1s`, tok: lang.DURATION, lit: `1s`},\n\t\t{s: `10m`, tok: lang.DURATION, lit: `10m`},\n\t\t{s: `10h`, tok: lang.DURATION, lit: `10h`},\n\t\t{s: `10d`, tok: lang.DURATION, lit: `10d`},\n\t\t{s: `10w`, tok: lang.DURATION, lit: `10w`},\n\t\t{s: `10x`, tok: lang.DURATION, lit: `10x`}, // non-duration unit, but scanned as a duration value\n\n\t\t// Keywords\n\t\t{s: `EACH`, tok: lang.EACH},\n\t\t{s: `each(!zero)`, tok: lang.EACH},\n\n\t\t// Bound params\n\t\t{s: `$Title`, tok: lang.BOUNDPARAM, lit: `Title`},\n\t\t{s: `$.Book.Description`, tok: lang.BOUNDPARAM, lit: `Book.Description`},\n\t}\n\n\tfor i, tc := range tests {\n\t\tt.Run(tc.s, func(t *testing.T) {\n\t\t\ts := lang.NewScanner(strings.NewReader(tc.s))\n\t\t\ttok, pos, lit := s.Scan()\n\t\t\tassert.Equal(t, tc.tok, tok, fmt.Sprintf(\"%d. %q token mismatch: exp=%q got=%q <%q>\", i, tc.s, tc.tok.String(), tok.String(), lit))\n\t\t\tassert.Equal(t, tc.pos, pos, fmt.Sprintf(\"%d. %q pos mismatch: exp=%#v got=%#v\", i, tc.s, tc.pos, pos))\n\t\t\tassert.Equal(t, tc.lit, lit, fmt.Sprintf(\"%d. %q literal mismatch: exp=%q got=%q\", i, tc.s, tc.lit, lit))\n\t\t})\n\t}\n}",
"func NewLexer(in io.Reader) IdlLexer {\n\tl := Lexer{in: in, tokenChan: make(chan token)}\n\tgo func() {\n\t\ts := bufio.NewScanner(l.in)\n\t\ts.Split(split)\n\t\tfor s.Scan() {\n\t\t\tl.tokenChan <- toToken(s.Text())\n\t\t}\n\t\tclose(l.tokenChan)\n\t}()\n\treturn l\n}",
"func lex(input io.Reader) chan token {\n\tr := &lexer{\n\t\tlineno: 1,\n\t\ttokStream: make(chan token),\n\t\tinput: bufio.NewReader(input),\n\t\tacc: make([]rune, 1),\n\t}\n\tgo r.run()\n\treturn r.tokStream\n}",
"func (l *lexer) run() {\n\tfor state := lexText; state != nil; {\n\t\t//fmt.Printf(\"%s\\n\", runtime.FuncForPC(reflect.ValueOf(state).Pointer()).Name())\n\n\t\tstate = state(l)\n\t}\n\tclose(l.tokens)\n}",
"func BenchmarkStandardLibraryLexer(b *testing.B) {\n\tvar lex scanner.Scanner\n\tsrc := []byte(test.Prog)\n\tfileSet := token.NewFileSet()\n\tfile := fileSet.AddFile(\"\", fileSet.Base(), len(src))\n\tfor i := 0; i < b.N; i++ {\n\t\tlex.Init(file, src, nil, scanner.ScanComments)\n\t\ttok := token.ILLEGAL\n\t\tfor tok != token.EOF {\n\t\t\t_, tok, _ = lex.Scan()\n\t\t}\n\t}\n}",
"func Lex(input string) *Lexer {\n\tl := &Lexer{\n\t\tinput: []rune(input),\n\t\titems: make(chan item),\n\t\tconsumed: true,\n\t}\n\tgo l.run()\n\treturn l\n}",
"func NewLexerRun(name string, r io.Reader, rec Record, runFn RunFn) (l *Lexer, err error) {\n\tif rec.Buflen < 1 {\n\t\terr = fmt.Errorf(\"rec.Buflen must be > 0: %d\", rec.Buflen)\n\t\treturn\n\t}\n\tif rec.ErrorFn == nil {\n\t\terr = fmt.Errorf(\"rec.ErrorFn must not be nil\")\n\t\treturn\n\t}\n\tl = &Lexer{\n\t\tname: name,\n\t\tr: r,\n\t\trec: rec,\n\t\titems: make(chan Item),\n\t\tnext: make([]byte, rec.Buflen),\n\t\teof: false,\n\t}\n\tgo func(l *Lexer, runFn RunFn) {\n\t\tdefer close(l.items)\n\t\trunFn(l)\n\t}(l, runFn)\n\n\treturn\n}",
"func (st *simpleTokenizer) parseSource(source io.Reader, lproc vertigo.LineProcessor) error {\n\tbrd := bufio.NewScanner(source)\n\n\tch := make(chan []interface{})\n\tchunk := make([]interface{}, channelChunkSize)\n\tgo func() {\n\t\ti := 0\n\t\tfor brd.Scan() {\n\t\t\tline := st.parseLine(brd.Text())\n\t\t\tfor _, token := range line {\n\t\t\t\tif token != \"\" {\n\t\t\t\t\tchunk[i] = &vertigo.Token{Word: token}\n\t\t\t\t\ti++\n\t\t\t\t\tif i == channelChunkSize {\n\t\t\t\t\t\ti = 0\n\t\t\t\t\t\tch <- chunk\n\t\t\t\t\t\tchunk = make([]interface{}, channelChunkSize)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif i > 0 {\n\t\t\tch <- chunk[:i]\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\tfor tokens := range ch {\n\t\tfor _, token := range tokens {\n\t\t\tswitch token.(type) {\n\t\t\tcase *vertigo.Token:\n\t\t\t\ttk := token.(*vertigo.Token)\n\t\t\t\tlproc.ProcToken(tk)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func LexParsing(input []rune) (*core.Lex, int) {\n\tvar (\n\t\toff, flag, action int\n\t\tlex lexEngine\n\t)\n\tlp := core.Lex{Source: append(input, []rune(` `)...), // added stop-character\n\t\tLines: make([]int, 0, 10), Strings: []string{``}}\n\tlex.Lex = &lp\n\tlex.Buf = make([]rune, 0, 4096)\n\tlex.Stack = make([]lexStack, 0, 16)\n\n\tnewLine := func(offset int) {\n\t\tif len(lp.Lines) == 0 || lp.Lines[len(lp.Lines)-1] != offset {\n\t\t\tlp.Lines = append(lp.Lines, offset)\n\t\t}\n\t}\n\n\tnewLine(0)\n\tlength := len(lp.Source)\n\tlp.Tokens = make([]core.Token, 0, 32+length/10)\n\t// Skip the first lines with # character\n\tvar hashMode bool\n\tfor lp.Source[off] == '#' || hashMode {\n\t\tstart := off\n\t\tfor ; off < length && lp.Source[off] != 0xa; off++ {\n\t\t}\n\t\tif off >= length {\n\t\t\tbreak\n\t\t}\n\t\toff++\n\t\tline := string(lp.Source[start:off])\n\t\tif strings.TrimSpace(line) == `###` {\n\t\t\thashMode = !hashMode\n\t\t} else if start != 0 || lp.Source[1] != '!' {\n\t\t\tif !hashMode {\n\t\t\t\tline = line[1:]\n\t\t\t}\n\t\t\tlp.Header += line\n\t\t}\n\t\tnewLine(off)\n\t}\n\tstate := lexMain\nmain:\n\tfor off < length {\n\t\tch := lp.Source[off]\n\t\tstart := off\n\t\tif ch > 127 {\n\t\t\tif unicode.IsSpace(ch) {\n\t\t\t\tch = forS\n\t\t\t} else if unicode.IsLetter(ch) {\n\t\t\t\tch = forL\n\t\t\t} else if unicode.IsPrint(ch) {\n\t\t\t\tch = forP\n\t\t\t}\n\t\t}\n\t\tif lp.Source[off] == 0xa {\n\t\t\tnewLine(off + 1)\n\t\t}\n\t\tpLexItem := lexTable[state][ch]\n\t\tlex.State = 0\n\t\tlex.Callback = false\n\t\tflag = pLexItem.Action & 0xffff00\n\t\tif flag&fNewBuf != 0 {\n\t\t\tlex.Buf = lex.Buf[:0]\n\t\t}\n\t\taction = pLexItem.Action & 0xff\n\t\tswitch v := pLexItem.Pattern.(type) {\n\t\tcase string:\n\t\t\ti := 1\n\t\t\tlength := len(v)\n\t\t\tfor ; i < length; i++ {\n\t\t\t\tif lp.Source[off+i] != rune(v[i]) {\n\t\t\t\t\tpLexDef := lexTable[state][forD]\n\t\t\t\t\tif pLexDef.Func != nil {\n\t\t\t\t\t\tpLexDef.Func(&lex, start, off)\n\t\t\t\t\t}\n\t\t\t\t\toff++\n\t\t\t\t\tcontinue main\n\t\t\t\t}\n\t\t\t}\n\t\t\toff += length - 1\n\t\tcase []string:\n\t\tpattern:\n\t\t\tfor _, pat := range v {\n\t\t\t\ti := 1\n\t\t\t\tlength := len(pat)\n\t\t\t\tfor ; i < length; i++ {\n\t\t\t\t\tif lp.Source[off+i] != rune(pat[i]) {\n\t\t\t\t\t\tcontinue pattern\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\toff += length - 1\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif pLexItem.Func != nil {\n\t\t\tlex.Off = off\n\t\t\tpLexItem.Func(&lex, start, off)\n\t\t\tif lex.State != 0 {\n\t\t\t\taction = lex.State & 0xff\n\t\t\t\tflag = lex.State & 0xffff00\n\t\t\t\toff = lex.Off\n\t\t\t}\n\t\t}\n\t\tswitch action {\n\t\tcase 0:\n\t\tcase lexError:\n\t\t\tlex.Error = flag\n\t\tcase lexBack, lexBackNext:\n\t\t\tlex.Callback = true\n\t\t\tfor {\n\t\t\t\tprev := lex.Stack[len(lex.Stack)-1]\n\t\t\t\tstate = prev.State\n\t\t\t\tlex.Stack = lex.Stack[:len(lex.Stack)-1]\n\t\t\t\tif prev.Action.Func != nil {\n\t\t\t\t\tlex.Off = off\n\t\t\t\t\tprev.Action.Func(&lex, prev.Offset, off)\n\t\t\t\t\toff = lex.Off\n\t\t\t\t\tif lex.State != 0 {\n\t\t\t\t\t\taction = lex.State\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif prev.Action.Action&fBack == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif action&0xff == lexBack {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tdefault:\n\t\t\tlex.Stack = append(lex.Stack, lexStack{State: state, Offset: off, Action: pLexItem})\n\t\t\tstate = action\n\t\t}\n\t\tif lex.Error != ErrSuccess {\n\t\t\tlp.NewTokens(off, tkError)\n\t\t\treturn &lp, lex.Error\n\t\t}\n\t\tif flag&fSkip != 0 {\n\t\t\toff++\n\t\t}\n\t\toff++\n\t}\n\tif lex.Colon {\n\t\tlp.NewTokens(off, tkRCurly)\n\t}\n\treturn &lp, ErrSuccess\n}",
"func NewLexer(reader io.Reader, capacity int) *Lexer {\n\tl := &Lexer{\n\t\treader: reader,\n\t\tbuffer: make([]byte, capacity),\n\t}\n\n\treturn l\n}",
"func newLexer(src string) *lexer {\n\tl := &lexer{src: src,\n\t\ttokenChan: make(chan token),\n\t}\n\tgo l.run()\n\treturn l\n}",
"func lex(input io.RuneReader, c chan token) {\n\tdefer close(c)\n\tl := &lexer{input, nil, ' ', c}\n\n\tvar err error\n\tf := stateFn(lexWhitespace)\n\tfor {\n\t\tf, err = f(l)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\terr = l.next()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != io.EOF {\n\t\tfmt.Println(err)\n\t}\n}",
"func lexerPreallocMem(inputSize int, numThreads int) {\n\tlexerInt64Pools = make([]*int64Pool, numThreads)\n\t\n\tavgCharsPerNumber := float64(4)\n\t\n\tpoolSizePerThread := int(math.Ceil((float64(inputSize) / avgCharsPerNumber) / float64(numThreads)))\n\n\tfor i := 0; i < numThreads; i++ {\n\t\tlexerInt64Pools[i] = newInt64Pool(poolSizePerThread)\n\t}\n}",
"func FakeLexer(s string, text func(string), emoji func(string)) {\n\ttext(s)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Test_Add_Read adds random entries to a testdb then tries to read those entries | func Test_Add_Read(t *testing.T) {
var test_shorthand = make([]byte, 20)
var test_fullpath = make([]byte, 20)
prio := -1
rand.Read(test_shorthand)
rand.Read(test_fullpath)
short := make_printable(test_shorthand)
full := make_printable(test_fullpath)
e := Entry{
shorthand: short,
full_path: full,
prio: prio,
extra: "TESTING",
}
AddEntry(&e)
res := GetShort(short)
if len(res) < 1{
t.Fail()
}
} | [
"func readTestData(t *testing.T, r FileSetReader, shard uint32, timestamp time.Time, entries []testEntry) {\n\tfor _, underTest := range readTestTypes {\n\t\terr := r.Open(testNs1ID, 0, timestamp)\n\t\trequire.NoError(t, err)\n\n\t\trequire.Equal(t, len(entries), r.Entries())\n\t\trequire.Equal(t, 0, r.EntriesRead())\n\n\t\tbloomFilter, err := r.ReadBloomFilter()\n\t\tassert.NoError(t, err)\n\t\t// Make sure the bloom filter doesn't always return true\n\t\tassert.False(t, bloomFilter.Test([]byte(\"some_random_data\")))\n\t\texpectedM, expectedK := bloom.EstimateFalsePositiveRate(\n\t\t\tuint(len(entries)), defaultIndexBloomFilterFalsePositivePercent)\n\t\tassert.Equal(t, expectedK, bloomFilter.K())\n\t\t// EstimateFalsePositiveRate always returns at least 1, so skip this check\n\t\t// if len entries is 0\n\t\tif len(entries) > 0 {\n\t\t\tassert.Equal(t, expectedM, bloomFilter.M())\n\t\t}\n\n\t\tfor i := 0; i < r.Entries(); i++ {\n\t\t\tswitch underTest {\n\t\t\tcase readTestTypeData:\n\t\t\t\tid, data, checksum, err := r.Read()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdata.IncRef()\n\n\t\t\t\tassert.Equal(t, entries[i].id, id.String())\n\t\t\t\tassert.True(t, bytes.Equal(entries[i].data, data.Get()))\n\t\t\t\tassert.Equal(t, digest.Checksum(entries[i].data), checksum)\n\n\t\t\t\tassert.Equal(t, i+1, r.EntriesRead())\n\n\t\t\t\t// Verify that the bloomFilter was bootstrapped properly by making sure it\n\t\t\t\t// at least contains every ID\n\t\t\t\tassert.True(t, bloomFilter.Test(id.Data().Get()))\n\n\t\t\t\tid.Finalize()\n\t\t\t\tdata.DecRef()\n\t\t\t\tdata.Finalize()\n\t\t\tcase readTestTypeMetadata:\n\t\t\t\tid, length, checksum, err := r.ReadMetadata()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.True(t, id.Equal(id))\n\t\t\t\tassert.Equal(t, digest.Checksum(entries[i].data), checksum)\n\t\t\t\tassert.Equal(t, len(entries[i].data), length)\n\n\t\t\t\tassert.Equal(t, i+1, r.MetadataRead())\n\n\t\t\t\t// Verify that the bloomFilter was bootstrapped properly by making sure it\n\t\t\t\t// at least contains every ID\n\t\t\t\tassert.True(t, bloomFilter.Test(id.Data().Get()))\n\n\t\t\t\tid.Finalize()\n\t\t\t}\n\t\t}\n\n\t\trequire.NoError(t, r.Close())\n\t}\n}",
"func ReadDatabase(ctx context.Context, tests TestMap) (m TestMap, err error) {\n\n\tm = make(TestMap)\n\n\tclient, err := firestore.NewClient(ctx, \"homepage-961\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer client.Close()\n\n\tfor k := range tests {\n\t\tvar doc *firestore.DocumentSnapshot\n\t\tdoc, err = client.Collection(\"monitoring\").Doc(k).Get(ctx)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif doc.Exists() {\n\t\t\tvar res TestResult\n\t\t\t// Add a new document.\n\t\t\terr = doc.DataTo(&res)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tm[k] = res\n\t\t}\n\t}\n\treturn\n}",
"func TestBasic(t *testing.T){\r\n\tif !TESTBASIC{\r\n\t\treturn\r\n\t}\r\n rafts,_ := makeRafts(5, \"input_spec.json\", \"log\", 220, 300)\t\r\n\tcontents := make([]string, 2)\r\n\tcontents[0] = \"foo\"\r\n\tcontents[1] = \"bar\"\r\n\t//To get one node elected as Leader\r\n\ttime.Sleep(2*time.Second)\r\n\trafts[0].Append([]byte(contents[0]))\r\n\trafts[0].Append([]byte(contents[1]))\r\n\tciarr := []int{0,0,0,0,0}\r\n\tfor cnt:=0;cnt<5;{\r\n\t\tfor idx, node := range rafts {\r\n\t\t\tselect {\r\n\t\t\tcase ci := <-node.CommitChannel():\r\n\t\t\t\tif ci.Err != nil {\r\n\t\t\t\t\tfmt.Fprintln(os.Stderr,ci.Err)\r\n\t\t\t\t}\r\n\t\t\t\texpect(t,contents[ciarr[idx]], string(ci.Data))\r\n\t\t\t\tciarr[idx] += 1\r\n\t\t\t\tif ciarr[idx] == 2{\r\n\t\t\t\t\tcnt += 1\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\tfor _, node := range rafts{\r\n\t\t//Tests LogStore actions\r\n\t\tnode.mainLogLock.RLock()\r\n\t\tdefer node.mainLogLock.RUnlock()\r\n\t\tiface, err := node.mainLog.Get(0)\r\n\t\tcheckError(t, err,fmt.Sprintf(\"NodeId:%v, mainLog.get(0) mainLog.LastIndex:%v\", node.Id(), node.mainLog.GetLastIndex()))\r\n\t\tif iface != nil{\r\n\t\t\tfoo := iface.([]byte)\r\n\t\t\texpect(t, string(foo), \"foo\")\r\n\t\t\tdebugRaftTest(fmt.Sprintf(\"0:%v\", string(foo)))\r\n\t\t}\r\n\t\tiface, err = node.mainLog.Get(1) \r\n\t\tcheckError(t, err, fmt.Sprintf(\"NodeId:%v, mainLog.get(1) mainLog.LastIndex:%v\", node.Id(), node.mainLog.GetLastIndex()))\r\n\t\tif iface != nil{\r\n\t\t\tbar := iface.([]byte)\r\n\t\t\texpect(t, string(bar), \"bar\")\r\n\t\t\tdebugRaftTest(fmt.Sprintf(\"1:%v\", string(bar)))\r\n\t\t}\r\n\r\n\t\t//Tests StateStore actions\r\n\t\tnode.stateLogLock.RLock()\r\n\t\tdefer node.stateLogLock.RUnlock()\r\n\t\tnode.smLock.RLock()\r\n\t\tdefer node.smLock.RUnlock()\r\n\t\tiface, err = node.stateLog.Get(0) \r\n\t\tcheckError(t, err, fmt.Sprintf(\"Id:%v, stateLog.get(0)\", node.Id()))\r\n\t\tif iface != nil{\r\n\t\t\tstate := iface.(StateInfo)\r\n\t\t\texpect(t, fmt.Sprintf(\"%v\", state.CurrTerm), fmt.Sprintf(\"%v\", node.sm.currTerm))\r\n\t\t\texpect(t, fmt.Sprintf(\"%v\", state.VotedFor), fmt.Sprintf(\"%v\", node.sm.votedFor))\r\n\t\t\texpect(t, state.Log.String(), node.sm.log.String())\r\n\t\t}\r\n\t}\r\n\r\n\tfor _, node := range rafts {\r\n\t\tnode.Shutdown()\r\n\t}\r\n\ttime.Sleep(1*time.Second)\t\t\t\r\n}",
"func TestIntCreateTwoPersonsAndReadBack(t *testing.T) {\n\tlog.SetPrefix(\"TestCreatePersonAndReadBack\")\n\t// Create a dao containing a session\n\tdbsession, err := dbsession.MakeGorpMysqlDBSession()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tdefer dbsession.Close()\n\n\tdao := MakeRepo(dbsession)\n\n\tclearDown(dao, t)\n\n\t//Create two people\n\tp1 := personModel.MakeInitialisedPerson(0, expectedForename1, expectedSurname1)\n\tperson1, err := dao.Create(p1)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tlog.Printf(\"person1 %s\", person1.String())\n\tp2 := personModel.MakeInitialisedPerson(0, expectedForename2, expectedSurname2)\n\tperson2, err := dao.Create(p2)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\t// read all the people in the DB - expect just the two we created\n\tpeople, err := dao.FindAll()\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\tif len(people) != 2 {\n\t\tt.Errorf(\"expected 2 rows, actual %d\", len(people))\n\t}\n\n\tmatches := 0\n\tfor _, person := range people {\n\t\tswitch person.Forename() {\n\t\tcase expectedForename1:\n\t\t\tif person.Surname() == expectedSurname1 {\n\t\t\t\tmatches++\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"expected surname to be %s actually %s\", expectedSurname1, person.Surname())\n\t\t\t}\n\t\tcase expectedForename2:\n\t\t\tif person.Surname() == expectedSurname2 {\n\t\t\t\tmatches++\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"expected forename to be %s actually %s\", expectedForename2, person.Forename())\n\t\t\t}\n\t\tdefault:\n\t\t\tt.Errorf(\"unexpected forename - %s\", person.Forename())\n\t\t}\n\t}\n\n\t// We should have just the records we created\n\tif matches != 2 {\n\t\tt.Errorf(\"expected two matches, actual %d\", matches)\n\t}\n\n\t// Find each of the records by ID and check the fields\n\tlog.Printf(\"finding person %d\", person1.ID())\n\tperson1Returned, err := dao.FindByID(person1.ID())\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\tif person1Returned.Forename() != expectedForename1 {\n\t\tt.Errorf(\"expected forename to be %s actually %s\",\n\t\t\texpectedForename1, person1Returned.Forename())\n\t}\n\tif person1Returned.Surname() != expectedSurname1 {\n\t\tt.Errorf(\"expected surname to be %s actually %s\",\n\t\t\texpectedSurname1, person1Returned.Surname())\n\t}\n\n\tvar IDStr = strconv.FormatUint(person2.ID(), 10)\n\tperson2Returned, err := dao.FindByIDStr(IDStr)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t}\n\n\tlog.Printf(\"found person %s\", person2Returned.String())\n\n\tif person2Returned.Forename() != expectedForename2 {\n\t\tt.Errorf(\"expected forename to be %s actually %s\",\n\t\t\texpectedForename2, person2Returned.Forename())\n\t}\n\tif person2Returned.Surname() != expectedSurname2 {\n\t\tt.Errorf(\"expected surname to be %s actually %s\",\n\t\t\texpectedSurname2, person2Returned.Surname())\n\t}\n\n\tclearDown(dao, t)\n}",
"func InitTestDB() error {\n\tif db == nil {\n\t\treturn errors.New(\"database not initialized\")\n\t}\n\n\tfor i := 0; i < 1000; i++ {\n\t\tkey := intToByteArray(i)\n\t\tvalue := GetByteArray(\"hello from \"+strconv.Itoa(i), \"string\")\n\t\terr := Insert(key, value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func Test_Read(t *testing.T) {\n\tctx := context.Background()\n\tdatabase, err := db.ConnectDB(\"\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tProjectService := NewProjectServiceServer(database)\n\treq := &v1.ReadRequest{\n\t\tApi: apiVersion,\n\t\tId: 2,\n\t}\n\tres, _ := ProjectService.Read(ctx, req)\n\tfmt.Println(res)\n\tt.Log(\"Done\")\n\n}",
"func readsNWrites(t *testing.T, blobs BlobAdmin) {\n\tfor _, testCase := range testData {\n\t\texpectedKey := toKeyOrDie(t, testCase.expectedHash)\n\t\t// 1 read must fail\n\t\t_, err := blobs.Read(expectedKey)\n\t\tassert(err != nil, t, \"Reading %s should had failed!\", testCase.expectedHash)\n\t\tassert(!strings.Contains(err.Error(), \"bytes long hash key\"), t,\n\t\t\t\"Error type when reading %s:%v\", testCase.expectedHash, err)\n\t\t// 2 write must succeed and key must match\n\t\tkey, err := blobs.Write(strings.NewReader(testCase.input))\n\t\tassert(err == nil, t, \"Error writing blob %s:%s\", testCase.expectedHash, err)\n\t\tassert(key.Equals(expectedKey), t, \"Expected blob key to be %s but got %s\", testCase.expectedHash, key)\n\t\t// 3 read must now succeed\n\t\treader, err := blobs.Read(key)\n\t\tassert(err == nil, t, \"Error fetching %s: %v\", key, err)\n\t\tblobBytes, err := ioutil.ReadAll(reader)\n\t\tassert(err == nil, t, \"Error reading %s: %v\", key, err)\n\t\tassert(bytes.Compare(blobBytes, []byte(testCase.input)) == 0, t,\n\t\t\t\"Expected to read '%s' but got '%s'\", testCase.input, blobBytes)\n\t\t// 4 writing again must succeed and key must match all over again\n\t\tkey, err = blobs.Write(strings.NewReader(testCase.input))\n\t\tassert(err == nil, t, \"Error writing blob %s:%s\", testCase.expectedHash, err)\n\t\tassert(key.Equals(expectedKey), t, \"Expected blob key to be %s but got %s\", testCase.expectedHash, key)\n\t\t// 5 remove must succeed\n\t\terr = blobs.Remove(key)\n\t\tassert(err == nil, t, \"Error removing %s: %v\", key, err)\n\t\t// 6 read must now fail\n\t\terr = blobs.Remove(key)\n\t\tassert(err == nil, t, \"Error removing %s: %v\", key, err)\n\t}\n}",
"func loadTestData(filename string) (*mockstore.DB, error) {\n\tinput, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tparsed, err := parser.ParseInsert(parser.InsertFormatTSV, string(input))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdb := mockstore.NewDB()\n\tnextKID := db.SkipIndex()*logread.MaxOffset + 1\n\tresolveOrCreateXID := func(xid string) uint64 {\n\t\tkid := db.Current().ResolveXID(xid)\n\t\tif kid != 0 {\n\t\t\treturn kid\n\t\t}\n\t\tkid = nextKID\n\t\tnextKID++\n\t\tdb.AddSPO(kid, wellknown.HasExternalID, rpc.AString(xid, 0))\n\t\treturn kid\n\t}\n\tfor _, line := range parsed.Facts {\n\t\tspo, err := makeFact(line, resolveOrCreateXID)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid test data line %s: %v\", line, err)\n\t\t}\n\t\tdb.AddSPO(spo.Subject, spo.Predicate, spo.Object)\n\t}\n\treturn db, nil\n}",
"func initTests() int {\n\trows, err := db.Query(\"SELECT idTEST, categories, conditions, params, period, scoreMap, custORacct FROM TXTEST\")\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// defer rows.Close()\n\ti := 0\n\tfor rows.Next() {\n\t\ttest := new(TxTest)\n\t\terr := rows.Scan(&test.TName, &test.CategoryStr, &test.Conditions, &test.ParamStr, &test.PeriodStr, &test.ScoreMapStr, &test.CustOrAcct)\n\t\tif err != nil {\n\t\t\trows.Close()\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ttest.Params, test.QueryType = parseParams(strings.Split(test.ParamStr, \",\"), test.CustOrAcct)\n\t\ttest.Period = *parsePeriod(test.PeriodStr)\n\t\ttest.ScoreMap = parseScoreMap(test.ScoreMapStr)\n\n\t\ttxTestCache[test.TName] = test\n\t\ti++\n\t\tfmt.Printf(\"\\ntest %s: %+v\", txTestCache[test.TName].TName, txTestCache[test.TName])\n\t}\n\trows.Close()\n\t//\treturn custs, err\n\treturn i\n}",
"func TestRetrieve(t *testing.T) {\n\t// drop the debug database\n\ttable, err := coreDatabase.TableConnect(true, \"perk\", []string{\"name\", \"brand\", \"value\", \"created\", \"expiry\"})\n\tif err != nil {\n\t\tt.Error(\"TestRetrieve Connection Error: \" + err.Error())\n\t\treturn\n\t}\n\t_ = table.DropCollection()\n\t// Insert test Items\n\tfor i := 0; i < 4; i++ {\n\t\tdateCreated := time.Date(2018, time.Month(i), 4, 0, 0, 0, 0, time.UTC)\n\t\tdateExpiry := time.Date(2018+i, time.Month(i+2), 4, 0, 0, 0, 0, time.UTC)\n\t\titem := bson.M{\"name\": \"Tesco\" + strconv.Itoa(i),\n\t\t\t\"brand\": \"Tesco\",\n\t\t\t\"value\": 1 + i*2,\n\t\t\t\"created\": dateCreated,\n\t\t\t\"expiry\": dateExpiry,\n\t\t}\n\t\terr = table.Insert(&item)\n\t}\n\t// query: filter by name\n\tFullList, err := Retrieve(\"Tesco0\", false, time.Time{}, table)\n\tif err != nil {\n\t\tt.Error(\"TestRetrieve Retrieve Error: \" + err.Error())\n\t\treturn\n\t}\n\tcompareItem := Item{Name: \"Tesco0\", Brand: \"Tesco\", Value: 1,\n\t\tCreated: time.Date(2018, 0, 4, 0, 0, 0, 0, time.UTC),\n\t\tExpiry: time.Date(2018, 2, 4, 0, 0, 0, 0, time.UTC),\n\t}\n\tif FullList[0] != compareItem {\n\t\tt.Error(\"TestRetrieve Retrieve check Item Error\")\n\t}\n\t// query: only active offers\n\tlayout := \"2006-01-02T15:04:05.000Z\"\n\tstr := \"2019-05-25T11:45:26.371Z\"\n\tstartTime, _ := time.Parse(layout, str)\n\tFullList, err = Retrieve(\"*\", true, startTime, table)\n\tif err != nil {\n\t\tt.Error(\"TestRetrieve Retrieve Active Error: \" + err.Error())\n\t\treturn\n\t}\n\tif len(FullList) != 2 {\n\t\tt.Error(\"TestRetrieve Retrieve check Active List\")\n\t}\n\n\t// query: all the offers\n\tFullList, err = Retrieve(\"*\", false, time.Time{}, table)\n\tif err != nil {\n\t\tt.Error(\"TestRetrieve Retrieve Active Error: \" + err.Error())\n\t\treturn\n\t}\n\tif len(FullList) != 4 {\n\t\tt.Error(\"TestRetrieve Retrieve All List\")\n\t}\n\t// cleanup\n\t_ = table.DropCollection()\n}",
"func (wf *OLTPSpanner) multiRandomRead(r *rand.Rand) error {\n\tconst numReads = 5\n\n\treadIDs := []int64{}\n\tfor i := 0; i < numReads; i++ {\n\t\treadID := datagen.RandomGeneratedTransactionID(r)\n\t\treadIDs = append(readIDs, readID)\n\t}\n\n\tstmt := spanner.Statement{\n\t\tSQL: `SELECT t.FromUserId, t.ToUserId\n\t\t\t\tFROM Transactions@{FORCE_INDEX=UniqueId} t\n\t\t\t\tWHERE t.Id IN UNNEST(@keys)`,\n\t\tParams: map[string]interface{}{\n\t\t\t\"keys\": readIDs,\n\t\t},\n\t}\n\titer := wf.client.Single().Query(wf.ctx, stmt)\n\tdefer iter.Stop()\n\tif err := wf.scanIterator(iter); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func Read(id_to_read int32, dbMgr *mgr.DBConn) SimpleDbType {\n\tdb := dbMgr.Open()\n\ttheReturn := SimpleDbType{}\n\n\tsqlRead := fmt.Sprintf(\"select id, name, number from test_table where id = %d\", id_to_read)\n\n\trows, err := db.Query(sqlRead)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\terr := rows.Scan(&theReturn.Id, &theReturn.Name, &theReturn.Number)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\terr = rows.Err()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdbMgr.Close()\n\treturn theReturn\n}",
"func TestReadOneUnreadRecord(t *testing.T) {\n\tconfigureLogp()\n\n\tfileName := \"test-log-single.aaLOG\"\n\n\ttestRecords := createTestLogRecords(fileName, 2, 1)\n\ttestHeader := createTestLogHeader(fileName, \"MyPC\", \"Session01\", \"FileX.aaLOG\", testRecords)\n\n\tdirectory := \"test-files\"\n\tfilePath := filepath.Join(directory, fileName)\n\terr := writeTestLogFile(filePath, &testHeader, testRecords)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\terr := deleteFileIfExists(filePath)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\toptions := map[string]interface{}{\n\t\t\"directory\": directory,\n\t\t\"file_pattern\": \"*.aaLOG\",\n\t\t\"batch_size\": 1000,\n\t}\n\t// Act like we already read the first record.\n\taalog, teardown := setupAaLog(t, filePath, 1, 110, options)\n\tdefer teardown()\n\n\trecords, err := aalog.Read()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tassert.Equal(t, fileName, aalog.Name())\n\tstate := aalog.State()\n\tassert.Equal(t, records[0].number, state.RecordNumber, \"state.RecordNumber\")\n\tassert.Equal(t, records[0].offset, state.RecordOffset, \"state.RecordOffset\")\n\n\tassert.Len(t, records, 1)\n\tr := records[0]\n\tassert.Equal(t, uint64(2), r.number)\n\tassertRecordsMatch(t, testRecords[1], r)\n}",
"func TestReadExistingAndNewLogs(t *testing.T) {\n\tt.Parallel()\n\toperator, logReceived, tempDir := newTestFileOperator(t, nil)\n\n\t// Start with a file with an entry in it, and expect that entry\n\t// to come through when we poll for the first time\n\ttemp := openTemp(t, tempDir)\n\twriteString(t, temp, \"testlog1\\n\")\n\n\trequire.NoError(t, operator.Start(testutil.NewMockPersister(\"test\")))\n\tdefer func() {\n\t\trequire.NoError(t, operator.Stop())\n\t}()\n\n\twaitForMessage(t, logReceived, \"testlog1\")\n\n\t// Write a second entry, and expect that entry to come through\n\t// as well\n\twriteString(t, temp, \"testlog2\\n\")\n\twaitForMessage(t, logReceived, \"testlog2\")\n}",
"func TestDbLog_GetList(t *testing.T) {\n\thandler := debug.NewLocalDb()\n\tdb := newDbLog(handler)\n\t_, err := db.Add(0, \"success\",\"\", 0, \"\", time.GetDayTime())\n\tif err == nil {\n\t\tt.Errorf(\"Add check cronId fail\")\n\t\treturn\n\t}\n\tid, err := db.Add(1, \"success\",\"123\", 1000, \"hello\", time.GetDayTime())\n\tif err != nil {\n\t\tt.Errorf(\"Add fail, error=[%v]\", err)\n\t\treturn\n\t}\n\trows, num, _, _, err := db.GetList(1, 0, 0)\n\tif err != nil || num <= 0 {\n\t\tt.Errorf(\"Get GetList, error=[%v], num=[%v]\", err, num)\n\t\treturn\n\t}\n\tvar row *LogEntity = nil\n\tfor _, r := range rows {\n\t\tif r.Id == id {\n\t\t\trow = r\n\t\t}\n\t}\n\tif row == nil {\n\t\tt.Errorf(\"GetList fail\")\n\t\treturn\n\t}\n\tif row.Id <= 0 || row.CronId != 1 || row.Output != \"123\"||\n\t\trow.UseTime != 1000 || row.Remark != \"hello\" {\n\t\tt.Errorf(\"Add check rows fail\")\n\t\treturn\n\t}\n\tdb.Delete(row.Id)\n}",
"func TestDbLog_Get(t *testing.T) {\n\thandler := debug.NewLocalDb()\n\tdb := newDbLog(handler)\n\t_, err := db.Add(0, \"success\", \"\", 0, \"\", time.GetDayTime())\n\tif err == nil {\n\t\tt.Errorf(\"Add check cronId fail\")\n\t\treturn\n\t}\n\tid, err := db.Add(1, \"success\", \"123\", 1000, \"hello\", time.GetDayTime())\n\tif err != nil {\n\t\tt.Errorf(\"Add fail, error=[%v]\", err)\n\t\treturn\n\t}\n\trow, err := db.Get(id)\n\tif err != nil {\n\t\tt.Errorf(\"Get fail, error=[%v]\", err)\n\t\treturn\n\t}\n\tif row.Id <= 0 || row.CronId != 1 || row.Output != \"123\"||\n\t\trow.UseTime != 1000 || row.Remark != \"hello\" {\n\t\tt.Errorf(\"Add check rows fail\")\n\t\treturn\n\t}\n\tdb.Delete(row.Id)\n}",
"func TestSaveAndRead(t *testing.T) {\n\tcrypter := &meowCrypter{}\n\tmetadata, entries, snapshot := makeWALData(1, 1)\n\n\tc := NewWALFactory(crypter, crypter)\n\ttempdir := createWithWAL(t, c, metadata, snapshot, entries)\n\tdefer os.RemoveAll(tempdir)\n\n\twrapped, err := c.Open(tempdir, snapshot)\n\trequire.NoError(t, err)\n\n\tmeta, _, ents, err := wrapped.ReadAll()\n\trequire.NoError(t, wrapped.Close())\n\trequire.NoError(t, err)\n\trequire.Equal(t, metadata, meta)\n\trequire.Equal(t, entries, ents)\n}",
"func TestSimple(t *testing.T) {\n\tt.Logf(\"Running simple table tests\")\n\tdb, err = DialUnix(TEST_SOCK, TEST_USER, TEST_PASSWD, TEST_DBNAME)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Create table\")\n\terr = db.Query(CREATE_SIMPLE)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Insert 1000 records\")\n\trowMap := make(map[uint64][]string)\n\tfor i := 0; i < 1000; i++ {\n\t\tnum, str1, str2 := rand.Int(), randString(32), randString(128)\n\t\terr = db.Query(fmt.Sprintf(INSERT_SIMPLE, num, str1, str2))\n\t\tif err != nil {\n\t\t\tt.Logf(\"Error %s\", err)\n\t\t\tt.Fail()\n\t\t}\n\t\trow := []string{fmt.Sprintf(\"%d\", num), str1, str2}\n\t\trowMap[db.LastInsertId] = row\n\t}\n\t\n\tt.Logf(\"Select inserted data\")\n\terr = db.Query(SELECT_SIMPLE)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Use result\")\n\tres, err := db.UseResult()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Validate inserted data\")\n\tfor {\n\t\trow := res.FetchRow()\n\t\tif row == nil {\n\t\t\tbreak\n\t\t}\n\t\tid := row[0].(uint64)\n\t\tnum, str1, str2 := strconv.Itoa64(row[1].(int64)), row[2].(string), string(row[3].([]byte))\n\t\tif rowMap[id][0] != num || rowMap[id][1] != str1 || rowMap[id][2] != str2 {\n\t\t\tt.Logf(\"String from database doesn't match local string\")\n\t\t\tt.Fail()\n\t\t}\n\t}\n\t\n\tt.Logf(\"Free result\")\n\terr = res.Free()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Update some records\")\n\tfor i := uint64(0); i < 1000; i += 5 {\n\t\trowMap[i+1][2] = randString(256)\n\t\terr = db.Query(fmt.Sprintf(UPDATE_SIMPLE, rowMap[i+1][2], i+1))\n\t\tif err != nil {\n\t\t\tt.Logf(\"Error %s\", err)\n\t\t\tt.Fail()\n\t\t}\n\t\tif db.AffectedRows != 1 {\n\t\t\tt.Logf(\"Expected 1 effected row but got %d\", db.AffectedRows)\n\t\t\tt.Fail()\n\t\t}\n\t}\n\t\n\tt.Logf(\"Select updated data\")\n\terr = db.Query(SELECT_SIMPLE)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Store result\")\n\tres, err = db.StoreResult()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Validate updated data\")\n\tfor {\n\t\trow := res.FetchRow()\n\t\tif row == nil {\n\t\t\tbreak\n\t\t}\n\t\tid := row[0].(uint64)\n\t\tnum, str1, str2 := strconv.Itoa64(row[1].(int64)), row[2].(string), string(row[3].([]byte))\n\t\tif rowMap[id][0] != num || rowMap[id][1] != str1 || rowMap[id][2] != str2 {\n\t\t\tt.Logf(\"%#v %#v\", rowMap[id], row)\n\t\t\tt.Logf(\"String from database doesn't match local string\")\n\t\t\tt.Fail()\n\t\t}\n\t}\n\t\n\tt.Logf(\"Free result\")\n\terr = res.Free()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\n\tt.Logf(\"Drop table\")\n\terr = db.Query(DROP_SIMPLE)\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n\t\n\tt.Logf(\"Close connection\")\n\terr = db.Close()\n\tif err != nil {\n\t\tt.Logf(\"Error %s\", err)\n\t\tt.Fail()\n\t}\n}",
"func TestReadMultipleUnreadRecords(t *testing.T) {\n\tconfigureLogp()\n\n\tfileName := \"test-log-multiple.aaLOG\"\n\ttestRecords := createTestLogRecords(fileName, 20, 1001)\n\ttestHeader := createTestLogHeader(fileName, \"PC0001\", \"Session27\", \"SomeFileY.aaLOG\", testRecords)\n\n\tdirectory := \"test-files\"\n\tfilePath := filepath.Join(directory, fileName)\n\terr := writeTestLogFile(filePath, &testHeader, testRecords)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\terr := deleteFileIfExists(filePath)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\toptions := map[string]interface{}{\n\t\t\"directory\": directory,\n\t\t\"file_pattern\": \"*.aaLOG\",\n\t\t\"batch_size\": 1000,\n\t}\n\t// Act like we already read the first record.\n\taalog, teardown := setupAaLog(t, filePath, 1001, 122, options)\n\tdefer teardown()\n\n\trecords, err := aalog.Read()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tassert.Equal(t, fileName, aalog.Name())\n\tstate := aalog.State()\n\tlastIndex := len(records) - 1\n\tassert.Equal(t, records[lastIndex].number, state.RecordNumber, \"state.RecordNumber\")\n\tassert.Equal(t, records[lastIndex].offset, state.RecordOffset, \"state.RecordOffset\")\n\n\tassert.Len(t, records, len(testRecords)-1)\n\tfor _, r := range records {\n\t\tlr, found := findMatchingRecord(r.number, testRecords)\n\t\tif !found {\n\t\t\tt.Errorf(\"Unknown record number %d returned by Read()\", r.number)\n\t\t}\n\t\tassertRecordsMatch(t, lr, r)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetUsersHandler lista todos los usuarios | func GetUsersHandler(w http.ResponseWriter, r *http.Request) {
var users []User
for _, v := range Listusers {
users = append(users, v)
}
w.Header().Set("Content-Type", "application/json")
j, err := json.Marshal(users)
if err != nil {
panic(err)
}
w.WriteHeader(http.StatusOK)
w.Write(j)
} | [
"func getUsersHandler(c *gin.Context) {\n\tuser, _ := c.Get(JwtIdentityKey)\n\n\t// Role check.\n\tif !isAdmin(user) {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\"message\": \"unauthorized\"})\n\t\treturn\n\t}\n\n\tpage := c.DefaultQuery(\"page\", \"1\")\n\tcount := c.DefaultQuery(\"count\", \"10\")\n\tpageInt, _ := strconv.Atoi(page)\n\tcountInt, _ := strconv.Atoi(count)\n\n\tif page == \"0\" {\n\t\tpageInt = 1\n\t}\n\n\tvar wg sync.WaitGroup\n\tvar users *[]types.User\n\tvar usersCount int\n\n\tdb := data.New()\n\twg.Add(1)\n\tgo func() {\n\t\tusers = db.Users.GetUsers((pageInt-1)*countInt, countInt)\n\t\twg.Done()\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tusersCount = db.Users.GetUsersCount()\n\t\twg.Done()\n\t}()\n\twg.Wait()\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": http.StatusOK,\n\t\t\"users\": users,\n\t\t\"count\": usersCount,\n\t})\n}",
"func (c *Controller) GetUsersHandler(cxt *gin.Context){\r\n\tusers := c.service.GetUsers()\r\n\tcxt.JSON(http.StatusOK, gin.H{\r\n\t\t\"users\": users, \r\n\t})\r\n}",
"func (h *Handler) HandleAllUsersGET(w http.ResponseWriter, r *http.Request) {\n\tcurrentAdmin := h.currentAdmin(w, r)\n\tif currentAdmin == nil {\n\t\treturn\n\t}\n\n\tusers, err := h.rDB.GetUsersAll()\n\tif err != nil {\n\t\tfmt.Printf(\"[ERROR] unable to get list of users, err : %v\", err)\n\t\trenderJSON(w, http.StatusInternalServerError,\n\t\t\tmap[string]string{\"error\": err500},\n\t\t)\n\t\treturn\n\t}\n\n\tviewArgs := map[string]interface{}{\n\t\t\"Header\": map[string]interface{}{\n\t\t\t\"DisplayName\": currentAdmin.Fname,\n\t\t},\n\t\t\"DisplayName\": currentAdmin.Fname,\n\t\t\"Users\": func() (result []interface{}) {\n\t\t\tfor _, user := range users {\n\n\t\t\t\tisLoggedIn := \"false\"\n\t\t\t\tif user.Session != \"\" {\n\t\t\t\t\tisLoggedIn = \"true\"\n\t\t\t\t}\n\t\t\t\tresult = append(result, map[string]interface{}{\n\t\t\t\t\t\"ID\": user.ID,\n\t\t\t\t\t\"Fname\": user.Fname,\n\t\t\t\t\t\"Lname\": user.Lname,\n\t\t\t\t\t\"Email\": user.Email,\n\t\t\t\t\t\"CreatedAt\": user.CreatedAt,\n\t\t\t\t\t\"IsLoggedIn\": isLoggedIn,\n\t\t\t\t})\n\t\t\t}\n\t\t\treturn\n\t\t}(),\n\t}\n\trenderHTML(w, \"adminUsers.html\", viewArgs)\n\treturn\n}",
"func GetUsersHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tj, status := users.GetAllUsers()\n\tw.WriteHeader(status)\n\tw.Write(j)\n}",
"func GetUsers(w http.ResponseWriter, r *http.Request) {\n\tvar users []UsersData\n\terr := model.FindAll(nil, &users)\n\tif err != nil {\n\t\tfmt.Println(\"err\", err)\n\t\tw.Write([]byte(\"Something wen't wrong!!\"))\n\t} else {\n\t\trender.JSON(w, 200, &users)\n\t}\n}",
"func GetUsers(w http.ResponseWriter, r *http.Request) {\n\tloginOrName := strings.ToLower(r.URL.Query().Get(\"user\"))\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\n\tusers, err := repository.SearchByLoginOrName(loginOrName)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusOK, users)\n\n}",
"func (h *handler) Users(w http.ResponseWriter, r *http.Request) {\n\tapiReq, err := http.NewRequest(\"GET\", h.serverAddress+\"/users\", nil)\n\tif err != nil {\n\t\tserverError(w, err)\n\t\treturn\n\t}\n\n\tclient := &http.Client{}\n\tres, err := client.Do(apiReq)\n\tif err != nil {\n\t\tserverError(w, err)\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\n\tvar uis []socialnet.UserItem\n\terr = json.NewDecoder(res.Body).Decode(&uis)\n\tif err != nil {\n\t\tserverError(w, err)\n\t\treturn\n\t}\n\n\terr = h.template.ExecuteTemplate(w, \"users.html\", uis)\n\tif err != nil {\n\t\tserverError(w, fmt.Errorf(\"failed to execute template users.html: %s\", err))\n\t\treturn\n\t}\n}",
"func UsersGET(w http.ResponseWriter, r *http.Request) {\n\tqParams := r.URL.Query()\n\n\tvar (\n\t\tpageStr = qParams.Get(\"page\")\n\t\tperPageStr = qParams.Get(\"per_page\")\n\t\tsort = qParams.Get(\"sort\")\n\t\torder = qParams.Get(\"order\")\n\t)\n\n\tpage, err := strconv.ParseUint(pageStr, 10, 64)\n\tif err != nil {\n\t\tpage = 1\n\t}\n\n\tperPage, err := strconv.ParseUint(perPageStr, 10, 64)\n\tif err != nil {\n\t\tperPage = uint64(constants.UsersQueryMaxPageSize)\n\t}\n\n\tpc := helper.PaginationConfig{\n\t\tPage: page,\n\t\tPerPageCount: perPage,\n\t}\n\n\tsc := helper.SortingConfig{\n\t\tSortBy: sort,\n\t\tOrder: order,\n\t}\n\n\tuserRows, err := user.Search(nil, &pc, &sc)\n\tif err != nil {\n\t\tlog.Error(\"Error %s\", err)\n\t\terrorCtrl.Error500(w, r)\n\t\treturn\n\t}\n\n\tusers := make([]*types.User, len(userRows))\n\n\tfor i, ur := range userRows {\n\t\tusers[i] = &types.User{\n\t\t\tUsername: ur.Username,\n\t\t\tID: ur.ID,\n\t\t\tName: ur.Name,\n\t\t\tEmail: misc.TerOpt(ur.IsPublicEmail, ur.Email, \"\").(string),\n\t\t\tJoinedAt: ur.JoinedAt,\n\t\t\tAvatarURL: ur.Avatar,\n\t\t\tBlog: ur.URL,\n\t\t\tOrganization: ur.Organization,\n\t\t\tLocation: ur.Location,\n\t\t\tPackagesCount: ur.PackagesCount,\n\t\t\tSocial: types.UserSocialAccounts{\n\t\t\t\tGithub: ur.Github,\n\t\t\t\tTwitter: ur.Twitter,\n\t\t\t\tStackOverflow: ur.StackOverflow,\n\t\t\t\tLinkedIn: ur.LinkedIn,\n\t\t\t},\n\t\t}\n\t}\n\n\thelper.WriteResponseValueOK(w, r, users)\n}",
"func GetUsers(req *http.Request, render render.Render, account services.Account) {\n qs := req.URL.Query()\n userIDs := qs[\"userId\"]\n var users []models.User\n for _, userID := range userIDs {\n if user, err := account.GetUser(userID); err != nil {\n render.JSON(err.HttpCode, err)\n return\n } else {\n users = append(users, *user)\n }\n }\n render.JSON(http.StatusOK, users)\n}",
"func (srv *UsersService) ListHandler(ctx *gin.Context) {\n\tlogger := srv.logger.New(\"action\", \"ListHandler\")\n\n\tcurrentUser := GetCurrentUser(ctx)\n\n\tlimitQuery := ctx.DefaultQuery(\"limit\", \"10\")\n\tpageQuery := ctx.DefaultQuery(\"page\", \"1\")\n\tparams := ctx.Request.URL.Query()\n\n\tvar adminsRoleIncluded = false\n\n\troles := params[\"filter[role_name]\"]\n\tif len(roles) > 0 {\n\t\tfor key, role := range roles {\n\t\t\t// remove root from role names if user is not root\n\t\t\t// only root can see root users\n\t\t\tif role == models.RoleRoot && currentUser.RoleName != models.RoleRoot {\n\t\t\t\tcopy(roles[key:], roles[key+1:])\n\t\t\t\troles[len(roles)-1] = \"\"\n\t\t\t\troles = roles[:len(roles)-1]\n\t\t\t}\n\t\t\tif role == models.RoleRoot || role == models.RoleAdmin {\n\t\t\t\tadminsRoleIncluded = true\n\t\t\t}\n\t\t}\n\t} else {\n\t\tadminsRoleIncluded = true\n\t}\n\n\tvar hasPerm bool\n\tif adminsRoleIncluded {\n\t\thasPerm = srv.PermissionsService.CanViewAdminProfile(currentUser.UID)\n\t} else {\n\t\thasPerm = srv.PermissionsService.CanViewUserProfile(currentUser.UID)\n\t}\n\n\tif !hasPerm {\n\t\tsrv.ResponseService.Forbidden(ctx)\n\t\treturn\n\t}\n\n\tquery := srv.Repository.GetUsersRepository().Filter(params)\n\n\tpagination, err := srv.Repository.GetUsersRepository().Paginate(query, pageQuery, limitQuery, serializers.NewUsers())\n\tif err != nil {\n\t\tlogger.Error(\"сan't load list of user\", \"error\", err)\n\t\t// Returns a \"400 StatusBadRequest\" response\n\t\tsrv.ResponseService.Error(ctx, responses.CannotRetrieveCollection, \"Can't load list of users\")\n\t\treturn\n\t}\n\n\t// Returns a \"200 OK\" response\n\tsrv.ResponseService.OkResponse(ctx, pagination)\n}",
"func (h *HTTPClientHandler) getAllUsersHandler(w http.ResponseWriter, r *http.Request) {\n\n\tuserid, _ := r.URL.Query()[\"q\"]\n\t// looking for specific user\n\tif len(userid) > 0 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"userid\": userid[0],\n\t\t}).Info(\"Looking for user..\")\n\n\t\tuser, err := h.db.getUser(userid[0])\n\n\t\tif err == nil {\n\t\t\t// Marshal provided interface into JSON structure\n\t\t\tresponse := UserResource{Data: user}\n\t\t\tuj, _ := json.Marshal(response)\n\n\t\t\t// Write content-type, statuscode, payload\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(200)\n\t\t\tfmt.Fprintf(w, \"%s\", uj)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t}).Warn(\"Failed to insert..\")\n\n\t\t\tcontent, code := responseDetailsFromMongoError(err)\n\n\t\t\t// Marshal provided interface into JSON structure\n\t\t\tuj, _ := json.Marshal(content)\n\n\t\t\t// Write content-type, statuscode, payload\n\t\t\twriteJsonResponse(w, &uj, code)\n\t\t\treturn\n\n\t\t}\n\t}\n\n\tlog.Warn(len(userid))\n\t// displaying all users\n\tresults, err := h.db.getUsers()\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Got error when tried to get all users\")\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"count\": len(results),\n\t}).Info(\"number of users\")\n\n\t// Marshal provided interface into JSON structure\n\tresponse := UsersResource{Data: results}\n\tuj, _ := json.Marshal(response)\n\n\t// Write content-type, statuscode, payload\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(200)\n\tfmt.Fprintf(w, \"%s\", uj)\n}",
"func (a *Api) GetUsers(res http.ResponseWriter, req *http.Request) {\n\tsessionToken := req.Header.Get(TP_SESSION_TOKEN)\n\tif tokenData, err := a.authenticateSessionToken(req.Context(), sessionToken); err != nil {\n\t\ta.sendError(res, http.StatusUnauthorized, STATUS_UNAUTHORIZED, err)\n\n\t} else if !tokenData.IsServer {\n\t\ta.sendError(res, http.StatusUnauthorized, STATUS_UNAUTHORIZED)\n\n\t} else if len(req.URL.Query()) == 0 {\n\t\ta.sendError(res, http.StatusBadRequest, STATUS_NO_QUERY)\n\n\t} else if role := req.URL.Query().Get(\"role\"); role != \"\" && !IsValidRole(role) {\n\t\ta.sendError(res, http.StatusBadRequest, STATUS_INVALID_ROLE)\n\n\t} else if userIds := strings.Split(req.URL.Query().Get(\"id\"), \",\"); len(userIds[0]) > 0 && role != \"\" {\n\t\ta.sendError(res, http.StatusBadRequest, STATUS_ONE_QUERY_PARAM)\n\n\t} else {\n\t\tvar users []*User\n\t\tswitch {\n\t\tcase role != \"\":\n\t\t\tif users, err = a.Store.WithContext(req.Context()).FindUsersByRole(role); err != nil {\n\t\t\t\ta.sendError(res, http.StatusInternalServerError, STATUS_ERR_FINDING_USR, err.Error())\n\t\t\t}\n\t\tcase len(userIds[0]) > 0:\n\t\t\tif users, err = a.Store.WithContext(req.Context()).FindUsersWithIds(userIds); err != nil {\n\t\t\t\ta.sendError(res, http.StatusInternalServerError, STATUS_ERR_FINDING_USR, err.Error())\n\t\t\t}\n\t\tdefault:\n\t\t\ta.sendError(res, http.StatusBadRequest, STATUS_PARAMETER_UNKNOWN)\n\t\t}\n\t\ta.logMetric(\"getusers\", sessionToken, map[string]string{\"server\": strconv.FormatBool(tokenData.IsServer)})\n\t\ta.sendUsers(res, users, tokenData.IsServer)\n\t}\n}",
"func UsersGet(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tlog.Println(\"starting retrieval\")\n\tstart := 0\n\tlimit := 10\n\n\tnext := start + limit\n\n\tw.Header().Set(\"Pragma\", \"no-cache\")\n\tw.Header().Set(\"Link\", \"<http://localhost:8080/api/users?start=\"+string(next)+\"; rel=\\\"next\\\"\")\n\n\trows, _ := database.Query(\"SELECT * FROM users LIMIT 10\")\n\n\tusers := Users{}\n\n\tfor rows.Next() {\n\t\tuser := User{}\n\t\trows.Scan(&user.ID, &user.Username, &user.First, &user.Last, &user.Email)\n\t\tusers.Users = append(users.Users, user)\n\t}\n\n\toutput, err := json.Marshal(users)\n\tif err != nil {\n\t\tfmt.Fprintln(w, \"Something went wrong while processing your request: \", err)\n\t}\n\n\tfmt.Fprintln(w, string(output))\n}",
"func (h userHTTP) List(w http.ResponseWriter, r *http.Request) {\n\tlistRequest := listRequestDecoder(r)\n\tusers, err := h.svc.ListUsers(r.Context(), listRequest)\n\tif err != nil {\n\t\th.Logger.With(r.Context()).Errorf(\"list users error : %s\", err)\n\t\trender.Render(w, r, e.BadRequest(err, \"bad request\"))\n\t\treturn\n\t}\n\trender.Respond(w, r, users)\n}",
"func GetUsers(c *gin.Context) {\n\tvar users []models.User\n\tlog.Println(\"GetUsers from db\")\n\tdb := db.GetDB()\n\tdb.Find(&users)\n\tc.JSON(http.StatusOK, users)\n}",
"func (h *UserHandler) handleGetUsers(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\n\treq, err := decodeGetUsersRequest(ctx, r)\n\tif err != nil {\n\t\tEncodeError(ctx, err, w)\n\t\treturn\n\t}\n\n\tusers, _, err := h.UserService.FindUsers(ctx, req.filter)\n\tif err != nil {\n\t\tEncodeError(ctx, err, w)\n\t\treturn\n\t}\n\n\terr = encodeResponse(ctx, w, http.StatusOK, newUsersResponse(users))\n\tif err != nil {\n\t\tEncodeError(ctx, err, w)\n\t\treturn\n\t}\n}",
"func GetUsers(c *gin.Context) {\n\n\tlog := logger.WithFields(logrus.Fields{\"tag\": \"GetUsers\"})\n\tlog.Info(\"Fetching users\")\n\n\torganization := auth.GetCurrentOrganization(c.Request)\n\n\tidParam := c.Param(\"id\")\n\tid, err := strconv.ParseUint(idParam, 10, 32)\n\tif idParam != \"\" && err != nil {\n\t\tmessage := fmt.Sprintf(\"error parsing user id: %s\", err)\n\t\tlog.Info(message)\n\t\tc.JSON(http.StatusBadRequest, components.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t\treturn\n\t}\n\n\tvar users []auth.User\n\tdb := model.GetDB()\n\terr = db.Model(organization).Related(&users, \"Users\").Error\n\tif err != nil {\n\t\tmessage := \"failed to fetch users\"\n\t\tlog.Info(message + \": \" + err.Error())\n\t\tc.AbortWithStatusJSON(http.StatusInternalServerError, components.ErrorResponse{\n\t\t\tCode: http.StatusInternalServerError,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t} else if id == 0 {\n\t\tc.JSON(http.StatusOK, users)\n\t} else if len(users) == 1 {\n\t\tc.JSON(http.StatusOK, users[0])\n\t} else if len(users) > 1 {\n\t\tmessage := fmt.Sprintf(\"multiple users found with id: %d\", id)\n\t\tlog.Info(message)\n\t\tc.AbortWithStatusJSON(http.StatusConflict, components.ErrorResponse{\n\t\t\tCode: http.StatusConflict,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t} else {\n\t\tmessage := fmt.Sprintf(\"user not found with id: %d\", id)\n\t\tlog.Info(message)\n\t\tc.AbortWithStatusJSON(http.StatusNotFound, components.ErrorResponse{\n\t\t\tCode: http.StatusNotFound,\n\t\t\tMessage: message,\n\t\t\tError: message,\n\t\t})\n\t}\n}",
"func GetUsers(c *gin.Context) {\n\tvar users []models.User\n\tdb := db.GetDB()\n\tdb.Find(&users)\n\tc.JSON(200, users)\n}",
"func ListUsersHandle(service iface.Service) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlimit := 100\n\t\trawLimit := r.URL.Query()[\"limit\"]\n\t\tif len(rawLimit) > 0 {\n\t\t\tvar err error\n\t\t\tlimit, err = strconv.Atoi(rawLimit[0])\n\t\t\tif err != nil || limit <= 0 {\n\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\tfmt.Fprintf(w, \"invalid limit \\\"%s\\\"\", rawLimit[0])\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tusers, err := service.FilterUsers(r.Context(), iface.FilterUsers{Limit: uint(limit)})\n\t\tif err != nil {\n\t\t\tlog.Log(err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"service failed\")\n\t\t\treturn\n\t\t}\n\n\t\tJSON(w, r, map[string]interface{}{\n\t\t\t\"users\": users,\n\t\t})\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
PostUserHandler crea un usuario en la base de datos | func PostUserHandler(w http.ResponseWriter, r *http.Request) {
var user User
err := json.NewDecoder(r.Body).Decode(&user)
if err != nil {
panic(err)
}
user.CreateAt = time.Now()
id++
k := strconv.Itoa(id)
Listusers[k] = user
w.Header().Set("Content-Type", "application/json")
j, err := json.Marshal(user)
if err != nil {
panic(err)
}
w.WriteHeader(http.StatusCreated)
w.Write(j)
} | [
"func PostUserHandler(w http.ResponseWriter, r *http.Request) {\n\tvar user models.User\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tlog.Println(\"Error al parsear usuario\")\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tstatus := users.CreateUser(user)\n\tw.WriteHeader(status)\n}",
"func CreateUserHandler(w http.ResponseWriter, r *http.Request) error {\n\tr.ParseForm()\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tuser := UserController{}\n\tif err := json.Unmarshal(body, &user); err != nil {\n\t\treturn err\n\t}\n\tif ok := tools.CheckEmail(user.Email); !ok {\n\t\tw.WriteHeader(400)\n\t\treturn nil\n\t}\n\tif ok := tools.CheckUsername(user.Username); !ok {\n\t\tw.WriteHeader(400)\n\t\treturn nil\n\t}\n\tpass, err := tools.HashPassword(user.Password)\n\tif err != nil {\n\t\treturn err\n\t}\n\tuser.User.Email = user.Email\n\tuser.User.Password = pass\n\tuser.User.Username = user.Username\n\tuser.Username = tools.GenerateUserName()\n\n\t// create session to add a user\n\tsession := MysqlEngine.NewSession()\n\taffected, err := user.User.Insert(session)\n\tif err != nil {\n\t\tsession.Rollback()\n\t\treturn err\n\t}\n\t// add home directory\n\terr = user.User.AddUserHome()\n\tif err != nil {\n\t\tsession.Rollback()\n\t\treturn err\n\t}\n\tsession.Commit()\n\tif affected == 0 {\n\t\tw.WriteHeader(400)\n\t\treturn nil\n\t}\n\treturn nil\n}",
"func (h *UserHandler) handlePostUser(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n //Decode request\n var req postUserRequest\n if err := json.NewDecoder(r.Body).Decode(&req.User); err != nil {\n Error(w, ErrInvalidJSON, http.StatusBadRequest, h.Logger)\n return\n }\n u := req.User\n\n //create a new user\n err := h.UserService.CreateUser(u)\n if err != nil {\n Error(w, err, http.StatusBadRequest, h.Logger)\n }\n w.Header().Set(\"Content-Type\", \"application/json\")\n json.NewEncoder(w).Encode(&postUserResponse{User: u})\n}",
"func PostUser(w http.ResponseWriter, req *http.Request, app *App) {\n\tif models.UserCount(app.Db) == 0 {\n\t\temail, password := req.FormValue(\"email\"), req.FormValue(\"password\")\n\t\tuser := models.NewUser(email, password)\n\t\terr := user.Save(app.Db)\n\t\tif err != nil {\n\t\t\thttp.Redirect(w, req, app.Config.General.Prefix+\"/register\", http.StatusFound)\n\t\t} else {\n\t\t\thttp.Redirect(w, req, app.Config.General.Prefix+\"/login\", http.StatusFound)\n\t\t}\n\t}\n}",
"func (h *UserHandler) handlePostUser(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\n\treq, err := decodePostUserRequest(ctx, r)\n\tif err != nil {\n\t\tEncodeError(ctx, err, w)\n\t\treturn\n\t}\n\n\tif err := h.UserService.CreateUser(ctx, req.User); err != nil {\n\t\tEncodeError(ctx, err, w)\n\t\treturn\n\t}\n\n\tif err := encodeResponse(ctx, w, http.StatusCreated, newUserResponse(req.User)); err != nil {\n\t\tEncodeError(ctx, err, w)\n\t\treturn\n\t}\n}",
"func CreateUserHandler(w http.ResponseWriter, req *http.Request) {\n // Validate internal token.\n if internalToken := req.Header.Get(app.Config.AuthHeaderName); internalToken != app.Config.RestApiToken {\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n\n // Parse & validate payload.\n var pl payload.CreateUserPayload\n\n if !pl.Validate(req) {\n respond.Error(w, errmsg.InvalidPayload())\n return\n }\n\n // Check if the executor is using the USER_CREATION_HASH to create this user.\n usingUserCreationPw := pl.ExecutorEmail == \"\" && app.Config.UserCreationHash != \"\" &&\n crypt.VerifySha256(pl.ExecutorPassword, app.Config.UserCreationHash)\n\n // If not using USER_CREATION_HASH for auth, verify executor exists using email/pw.\n if !usingUserCreationPw {\n // Get executor user by email.\n executorUser, err := usersvc.FromEmail(pl.ExecutorEmail)\n\n if err != nil {\n app.Log.Errorln(err.Error())\n respond.Error(w, errmsg.UserNotFound())\n return\n }\n\n // Ensure executor user's password is correct.\n if !crypt.VerifyBcrypt(pl.ExecutorPassword, executorUser.HashedPw) {\n app.Log.Errorln(\"error creating new User: invalid executor user password\")\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n\n // Only admin users can create other users.\n if !executorUser.Admin {\n app.Log.Errorln(\"error creating new User: executor user must be an admin\")\n respond.Error(w, errmsg.Unauthorized())\n return\n }\n }\n\n // Hash provided user password.\n hashedPw, err := crypt.BcryptHash(pl.NewPassword)\n\n if err != nil {\n app.Log.Errorf(\"error creating new User: bcrypt password hash failed with %s\\n\", err.Error())\n respond.Error(w, errmsg.ISE())\n return\n }\n\n // Create new User.\n newUser, err := usersvc.Create(pl.NewEmail, hashedPw, pl.Admin)\n\n if err != nil {\n app.Log.Errorln(err.Error())\n pqError, ok := err.(*pq.Error)\n\n if ok && pqError.Code.Name() == \"unique_violation\" {\n respond.Error(w, errmsg.EmailNotAvailable())\n } else {\n respond.Error(w, errmsg.UserCreationFailed())\n }\n\n return\n }\n\n // Create response payload and respond.\n respData := successmsg.UserCreationSuccess\n respData[\"uid\"] = newUser.Uid\n\n respond.Created(w, respData)\n}",
"func (e *env) UserSignupPostHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\tcase \"POST\":\n\t\tusername := r.FormValue(\"username\")\n\t\tpassword := r.FormValue(\"password\")\n\n\t\terr := e.authState.NewUser(username, password)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error adding user:\", err)\n\t\t\te.authState.SetFlash(\"Error adding user. Check logs.\", r)\n\t\t\thttp.Redirect(w, r, r.Referer(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t// Login the recently added user\n\t\tif e.authState.Auth(username, password) {\n\t\t\te.authState.Login(username, r)\n\t\t}\n\n\t\te.authState.SetFlash(\"Successfully added '\"+username+\"' user.\", r)\n\t\thttp.Redirect(w, r, \"/\", http.StatusSeeOther)\n\n\tcase \"PUT\":\n\t\t// Update an existing record.\n\tcase \"DELETE\":\n\t\t// Remove the record.\n\tdefault:\n\t\t// Give an error message.\n\t}\n}",
"func CreateUserHandler(w http.ResponseWriter, r *http.Request) {\n\n\tuser := &models.User{}\n\terr := json.NewDecoder(r.Body).Decode(user) //decode the request body into struct and fail if any error occur\n\tif err != nil {\n\t\tfmt.Println(\"Debug user CreateUserHandler:\", err)\n\t\tutils.Respond(w, utils.Message(false, \"Invalid request\"))\n\t\treturn\n\t}\n\n\tresp := user.Create() //Create user\n\tutils.Respond(w, resp)\n}",
"func UserCreate(w http.ResponseWriter, r *http.Request) {\n\terr := json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\tm.Message = fmt.Sprintf(\"Error al leer el usuario a registrarse: %s\", err)\n\t\tm.Code = http.StatusBadRequest\n\t\tcommons.DisplayMessage(w, m)\n\t\treturn\n\t}\n\tif user.Password != user.ConfirmPassword {\n\t\tm.Message = \"Las contraseña no coinciden\"\n\t\tm.Code = http.StatusBadRequest\n\t\tcommons.DisplayMessage(w, m)\n\t\treturn\n\t}\n\tuser.Password = password\n\tavatarmd5 := md5.Sum([]byte(user.Password))\n\tavatarstr := fmt.Sprintf(\"%x\", avatarmd5)\n\tuser.Avatar = \"https://gravatar.com/avatar/\" + avatarstr + \"?s=100\"\n\tdatabase := configuration.GetConnection()\n\tdefer database.Close()\n\terr = database.Create(&user).Error\n\tif err != nil {\n\t\tm.Message = fmt.Sprintf(\"Error al crear el registro: %s\", err)\n\t\tm.Code = http.StatusBadRequest\n\t\tcommons.DisplayMessage(w, m)\n\t\treturn\n\t}\n\tm.Message = \"Usuario creado con éxito\"\n\tm.Code = http.StatusCreated\n\tcommons.DisplayMessage(w, m)\n}",
"func PostUser(w http.ResponseWriter, req *http.Request) {\n\tID := req.FormValue(\"id\")\n\tnameStr := req.FormValue(\"name\")\n\tname := string(nameStr)\n\n\tuser := db.User{ID: ID, Name: name}\n\n\tdb.Save(user)\n\n\tw.Write([]byte(\"OK\"))\n}",
"func PostUser(w http.ResponseWriter, r *http.Request) {\n\tresponse := services.CreateUser(r)\n\n\trender.Status(r, response.Code)\n\trender.JSON(w, r, response)\n}",
"func createUser(w http.ResponseWriter, r *http.Request) {\n\t//Recieve data from web app\n\tvar newUser User\n\t_ = json.NewDecoder(r.Body).Decode(&newUser)\n\n\t//Make database connection\n\tdb := opendb()\n\tdefer db.Close()\n\n\t//Create and execute SQL statement\n\tsqlStatement := `INSERT INTO \"user\" (user_first_name, user_last_name, user_password) VALUES ($1, $2, $3)`\n\t_, err := db.Exec(sqlStatement, newUser.FirstName, newUser.LastName, newUser.Password)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func NewUserHandler() *UserHandler {\n h := &UserHandler{\n Router: httprouter.New(),\n Logger: log.New(os.Stderr, \"\", log.LstdFlags),\n }\n h.POST(\"/api/user\", h.handlePostUser)\n return h\n}",
"func (userController *UserController) PostCreateUserPage(w http.ResponseWriter, r *http.Request) {\n\tvar user Pengguna\n\tr.ParseForm()\n\n\t// Prevent csrf attack\n\tcsrfToken := userController.SessionHelper.GetCSRFToken(r)\n\tif csrfToken != r.FormValue(\"csrfToken\") {\n\t\tuserController.SessionHelper.SetValidationMessage(r, w, \"csrf token salah\")\n\t\thttp.Redirect(w, r, \"/user/create\", http.StatusFound)\n\t\treturn\n\t}\n\n\t// Prevent xss attack\n\thashPassword, err := userController.PasswordHashHelper.HashPassword(\n\t\ttemplate.HTMLEscapeString(r.FormValue(\"password\")))\n\tif err != nil {\n\t\tfmt.Println(\"error when hash user password\")\n\t}\n\n\tage, err := strconv.Atoi(template.HTMLEscapeString(r.FormValue(\"age\")))\n\tif err != nil {\n\t\tfmt.Println(\"error when parse user age\")\n\t}\n\n\tuserGroupID, err := strconv.ParseInt(template.HTMLEscapeString(r.FormValue(\"group\")), 10, 64)\n\tif err != nil {\n\t\tfmt.Println(\"error when parse user group id\")\n\t}\n\n\tuser.Email = template.HTMLEscapeString(r.FormValue(\"email\"))\n\tuser.NamaDepan = template.HTMLEscapeString(r.FormValue(\"firstname\"))\n\tuser.NamaBelakang = template.HTMLEscapeString(r.FormValue(\"lastname\"))\n\tuser.KataSandi = hashPassword\n\tuser.Umur = age\n\n\t// Validate user input data\n\tvalidationMessage := userController.UserHelper.ValidateCreateNewUser(user)\n\tif validationMessage == \"Sukses\" {\n\t\tif userController.UserRepository.IsUserEmailExist(user.Email) {\n\t\t\tvalidationMessage = \"Email sudah ada di database\"\n\t\t} else {\n\t\t\tuserID, err := userController.UserRepository.CreateNewUser(user)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"error when create user\")\n\t\t\t}\n\n\t\t\terr = userController.GroupRepository.InsertUserGroup(userID, userGroupID)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"error when insert user group\")\n\t\t\t}\n\n\t\t\tuserController.SessionHelper.SetValidationMessage(r, w, \"\")\n\t\t\thttp.Redirect(w, r, \"/profile\", http.StatusFound)\n\t\t\treturn\n\t\t}\n\t}\n\n\tuserController.SessionHelper.SetValidationMessage(r, w, validationMessage)\n\thttp.Redirect(w, r, \"/user/create\", http.StatusFound)\n}",
"func UserRegisterPostHandler(w http.ResponseWriter, r *http.Request) {\n\tb := form.RegistrationForm{}\n\terr := form.NewErrors()\n\tif !captcha.Authenticate(captcha.Extract(r)) {\n\t\terr[\"errors\"] = append(err[\"errors\"], \"Wrong captcha!\")\n\t}\n\tif len(err) == 0 {\n\t\tif len(r.PostFormValue(\"email\")) > 0 {\n\t\t\t_, err = form.EmailValidation(r.PostFormValue(\"email\"), err)\n\t\t}\n\t\t_, err = form.ValidateUsername(r.PostFormValue(\"username\"), err)\n\t\tif len(err) == 0 {\n\t\t\tmodelHelper.BindValueForm(&b, r)\n\t\t\terr = modelHelper.ValidateForm(&b, err)\n\t\t\tif len(err) == 0 {\n\t\t\t\t_, errorUser := userService.CreateUser(w, r)\n\t\t\t\tif errorUser != nil {\n\t\t\t\t\terr[\"errors\"] = append(err[\"errors\"], errorUser.Error())\n\t\t\t\t}\n\t\t\t\tif len(err) == 0 {\n\t\t\t\t\tcommon := NewCommonVariables(r)\n\t\t\t\t\tcommon.User = &model.User{\n\t\t\t\t\t\tEmail: r.PostFormValue(\"email\"), // indicate whether user had email set\n\t\t\t\t\t}\n\t\t\t\t\thtv := UserRegisterTemplateVariables{common, b, err}\n\t\t\t\t\terrorTmpl := viewRegisterSuccessTemplate.ExecuteTemplate(w, \"index.html\", htv)\n\t\t\t\t\tif errorTmpl != nil {\n\t\t\t\t\t\thttp.Error(w, errorTmpl.Error(), http.StatusInternalServerError)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(err) > 0 {\n\t\tb.CaptchaID = captcha.GetID()\n\t\thtv := UserRegisterTemplateVariables{NewCommonVariables(r), b, err}\n\t\terrorTmpl := viewRegisterTemplate.ExecuteTemplate(w, \"index.html\", htv)\n\t\tif errorTmpl != nil {\n\t\t\thttp.Error(w, errorTmpl.Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}",
"func UserRegisterPostHandler(w http.ResponseWriter, r *http.Request) {\n\tb := form.RegistrationForm{}\n\terr := form.NewErrors()\n\tif !captcha.Authenticate(captcha.Extract(r)) {\n\t\terr[\"errors\"] = append(err[\"errors\"], \"Wrong captcha!\")\n\t}\n\tif len(err) == 0 {\n\t\tif len(r.PostFormValue(\"email\")) > 0 {\n\t\t\t_, err = form.EmailValidation(r.PostFormValue(\"email\"), err)\n\t\t}\n\t\t_, err = form.ValidateUsername(r.PostFormValue(\"username\"), err)\n\t\tif len(err) == 0 {\n\t\t\tmodelHelper.BindValueForm(&b, r)\n\t\t\terr = modelHelper.ValidateForm(&b, err)\n\t\t\tif len(err) == 0 {\n\t\t\t\t_, errorUser := userService.CreateUser(w, r)\n\t\t\t\tif errorUser != nil {\n\t\t\t\t\terr[\"errors\"] = append(err[\"errors\"], errorUser.Error())\n\t\t\t\t}\n\t\t\t\tif len(err) == 0 {\n\t\t\t\t\tlanguages.SetTranslationFromRequest(viewRegisterSuccessTemplate, r, \"en-us\")\n\t\t\t\t\tu := model.User{\n\t\t\t\t\t\tEmail: r.PostFormValue(\"email\"), // indicate whether user had email set\n\t\t\t\t\t}\n\t\t\t\t\thtv := UserRegisterTemplateVariables{b, err, NewSearchForm(), Navigation{}, &u, r.URL, mux.CurrentRoute(r)}\n\t\t\t\t\terrorTmpl := viewRegisterSuccessTemplate.ExecuteTemplate(w, \"index.html\", htv)\n\t\t\t\t\tif errorTmpl != nil {\n\t\t\t\t\t\thttp.Error(w, errorTmpl.Error(), http.StatusInternalServerError)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(err) > 0 {\n\t\tb.CaptchaID = captcha.GetID()\n\t\tlanguages.SetTranslationFromRequest(viewRegisterTemplate, r, \"en-us\")\n\t\thtv := UserRegisterTemplateVariables{b, err, NewSearchForm(), Navigation{}, GetUser(r), r.URL, mux.CurrentRoute(r)}\n\t\terrorTmpl := viewRegisterTemplate.ExecuteTemplate(w, \"index.html\", htv)\n\t\tif errorTmpl != nil {\n\t\t\thttp.Error(w, errorTmpl.Error(), http.StatusInternalServerError)\n\t\t}\n\t}\n}",
"func AddUserHandler(ctx *gin.Context) {\n\n\tbody := ctx.Request.Body\n\tdefer body.Close()\n\n\tuser := new(model.User)\n\n\terr := json.NewDecoder(body).Decode(user)\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tctx.JSON(400, \"Malformed request body\")\n\t\treturn\n\t}\n\n\tidStr := ctx.Param(\"id\")\n\n\tpassword := user.Password\n\tfmt.Println(password)\n\n\tif idStr != \"\" {\n\t\tid, _ := strconv.ParseInt(idStr, 10, 64)\n\t\tuser.ID = &id\n\t} else {\n\t\tpassByte, _ := bcrypt.GenerateFromPassword([]byte(\"admin\"), 0)\n\t\tuser.Password = string(passByte)\n\t}\n\n\terr = repository.AddUserOrUpdate(user)\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tctx.JSON(500, err.Error())\n\t\treturn\n\t}\n\n\tif idStr == \"\" {\n\t\tgo sendWelcomeMail(*user, password)\n\t}\n\n\tctx.JSON(200, \"Ok\")\n\n}",
"func NewUserCreateHandler(db *gorm.DB) echo.HandlerFunc {\n\treturn func(ctx echo.Context) error {\n\t\tuser := &model.User{}\n\t\tif err := ctx.Bind(user); err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err)\n\t\t}\n\n\t\tif err := user.Validate(); err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusUnprocessableEntity, err)\n\t\t}\n\n\t\thashBytes, err := bcrypt.GenerateFromPassword([]byte(user.Password), 10)\n\t\tif err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err)\n\t\t}\n\n\t\tuser.Password = \"\"\n\t\tuser.PasswordDigest = hashBytes\n\t\tuser.ResetJWTToken()\n\n\t\tif err := db.Create(user).Error; err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusInternalServerError, err.Error())\n\t\t}\n\n\t\treturn ctx.JSON(http.StatusCreated, user)\n\t}\n}",
"func UserRegisterPost(w http.ResponseWriter, r *http.Request) {\n\t// Get session\n\tsess := session.Instance(r)\n\n\t// Prevent brute force login attempts by not hitting MySQL and pretending like it was invalid :-)\n\tif sess.Values[\"register_attempt\"] != nil && sess.Values[\"register_attempt\"].(int) >= 5 {\n\t\tlog.Println(\"Brute force register prevented\")\n\t\thttp.Redirect(w, r, \"/not_found\", http.StatusFound)\n\t\treturn\n\t}\n\n\tbody, readErr := ioutil.ReadAll(r.Body)\n\tif readErr != nil {\n\t\tlog.Println(readErr)\n\t\tReturnError(w, readErr)\n\t\treturn\n\t}\n\n\tvar regResp webpojo.UserCreateResp\n\tif len(body) == 0 {\n\t\tlog.Println(\"Empty json payload\")\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_400, constants.Msg_400}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t\treturn\n\t}\n\n\t//log.Println(\"r.Body\", string(body))\n\tregReq := webpojo.UserCreateReq{}\n\tjsonErr := json.Unmarshal(body, ®Req)\n\tif jsonErr != nil {\n\t\tlog.Println(jsonErr)\n\t\tReturnError(w, jsonErr)\n\t\treturn\n\t}\n\tlog.Println(regReq.Email)\n\n\t// Validate with required fields\n\tif validate, _ := validateRegisterInfo(r, ®Req, constants.DefaultRole); !validate {\n\t\tlog.Println(\"Invalid reg request! Missing field\")\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_400, constants.Msg_400}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t\treturn\n\t}\n\n\tpassword, errp := passhash.HashString(regReq.Password)\n\n\t// If password hashing failed\n\tif errp != nil {\n\t\tlog.Println(errp)\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_500, constants.Msg_500}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t\treturn\n\t}\n\n\t// Get database result\n\t_, err := model.UserByEmail(regReq.Email)\n\n\tif err == model.ErrNoResult { // If success (no user exists with that email)\n\t\tex := model.UserCreate(regReq.FirstName, regReq.LastName, regReq.Email, password)\n\t\t// Will only error if there is a problem with the query\n\t\tif ex != nil {\n\t\t\tlog.Println(ex)\n\t\t\tRecordRegisterAttempt(sess)\n\t\t\tsess.Save(r, w)\n\t\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_500, constants.Msg_500}\n\t\t\tbs, err := json.Marshal(regResp)\n\t\t\tif err != nil {\n\t\t\t\tReturnError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprint(w, string(bs))\n\t\t} else {\n\t\t\tlog.Println(\"Account created successfully for: \" + regReq.Email)\n\t\t\tRecordRegisterAttempt(sess)\n\t\t\tsess.Save(r, w)\n\t\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_200, constants.Msg_200}\n\t\t\tbs, err := json.Marshal(regResp)\n\t\t\tif err != nil {\n\t\t\t\tReturnError(w, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfmt.Fprint(w, string(bs))\n\t\t}\n\t} else if err != nil { // Catch all other errors\n\t\tlog.Println(err)\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_500, constants.Msg_500}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t} else { // Else the user already exists\n\t\tlog.Println(\"User already existed!!!\")\n\t\tRecordRegisterAttempt(sess)\n\t\tsess.Save(r, w)\n\t\tregResp = webpojo.UserCreateResp{constants.StatusCode_400, constants.Msg_400}\n\t\tbs, err := json.Marshal(regResp)\n\t\tif err != nil {\n\t\t\tReturnError(w, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprint(w, string(bs))\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
PutUserHandler Actualiza un usuario en base al id | func PutUserHandler(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
k := params["id"]
var userupdate User
err := json.NewDecoder(r.Body).Decode(&userupdate)
if err != nil {
panic(err)
}
if user, ok := Listusers[k]; ok {
userupdate.CreateAt = user.CreateAt
delete(Listusers, k)
Listusers[k] = userupdate
} else {
log.Printf("No encontramos el id %s", k)
}
w.WriteHeader(http.StatusNoContent)
} | [
"func PutUserHandler(w http.ResponseWriter, r *http.Request) {\n\tid := mux.Vars(r)[\"id\"]\n\tvar userUpdate models.User\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\terr := json.NewDecoder(r.Body).Decode(&userUpdate)\n\tif err != nil {\n\t\tlog.Printf(\"Error al parsear usuario con el id %s\", id)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\tstatus := users.PutUser(id, userUpdate)\n\tw.WriteHeader(status)\n}",
"func (handler *UserHandler) handlePutUser(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\tuserID, err := strconv.Atoi(id)\n\tif err != nil {\n\t\tError(w, err, http.StatusBadRequest, handler.Logger)\n\t\treturn\n\t}\n\n\ttokenData, err := extractTokenDataFromRequestContext(r)\n\tif err != nil {\n\t\tError(w, err, http.StatusInternalServerError, handler.Logger)\n\t}\n\n\tif tokenData.Role != portainer.AdministratorRole && tokenData.ID != portainer.UserID(userID) {\n\t\tError(w, portainer.ErrUnauthorized, http.StatusForbidden, handler.Logger)\n\t\treturn\n\t}\n\n\tvar req putUserRequest\n\tif err = json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\tError(w, ErrInvalidJSON, http.StatusBadRequest, handler.Logger)\n\t\treturn\n\t}\n\n\t_, err = govalidator.ValidateStruct(req)\n\tif err != nil {\n\t\tError(w, ErrInvalidRequestFormat, http.StatusBadRequest, handler.Logger)\n\t\treturn\n\t}\n\n\tif req.Password == \"\" && req.Role == 0 {\n\t\tError(w, ErrInvalidRequestFormat, http.StatusBadRequest, handler.Logger)\n\t\treturn\n\t}\n\n\tuser, err := handler.UserService.User(portainer.UserID(userID))\n\tif err == portainer.ErrUserNotFound {\n\t\tError(w, err, http.StatusNotFound, handler.Logger)\n\t\treturn\n\t} else if err != nil {\n\t\tError(w, err, http.StatusInternalServerError, handler.Logger)\n\t\treturn\n\t}\n\n\tif req.Password != \"\" {\n\t\tuser.Password, err = handler.CryptoService.Hash(req.Password)\n\t\tif err != nil {\n\t\t\tError(w, portainer.ErrCryptoHashFailure, http.StatusBadRequest, handler.Logger)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif req.Role != 0 {\n\t\tif tokenData.Role != portainer.AdministratorRole {\n\t\t\tError(w, portainer.ErrUnauthorized, http.StatusForbidden, handler.Logger)\n\t\t\treturn\n\t\t}\n\t\tif req.Role == 1 {\n\t\t\tuser.Role = portainer.AdministratorRole\n\t\t} else {\n\t\t\tuser.Role = portainer.StandardUserRole\n\t\t}\n\t}\n\n\terr = handler.UserService.UpdateUser(user.ID, user)\n\tif err != nil {\n\t\tError(w, err, http.StatusInternalServerError, handler.Logger)\n\t\treturn\n\t}\n}",
"func PutUserByID(w http.ResponseWriter, r *http.Request) {\n\t// Get Parameters From URI\n\tparamID := chi.URLParam(r, \"id\")\n\n\t// Get ID Parameters From URI Then Convert it to Integer\n\tuserID, err := strconv.Atoi(paramID)\n\tif err != nil {\n\t\trouter.ResponseInternalError(w, err.Error())\n\t\treturn\n\t}\n\n\t// Check if Requested Data in User Array Range\n\tif userID <= 0 || userID > len(model.Users) {\n\t\trouter.ResponseBadRequest(w, \"invalid array index\")\n\t\treturn\n\t}\n\n\tvar user model.User\n\n\t// Decode JSON from Request Body to User Data\n\t// Use _ As Temporary Variable\n\t_ = json.NewDecoder(r.Body).Decode(&user)\n\n\t// Update User to Users Array\n\tmodel.Users[userID-1].Name = user.Name\n\tmodel.Users[userID-1].Email = user.Email\n\n\trouter.ResponseUpdated(w)\n}",
"func (h *handler) updateUser(w http.ResponseWriter, r *http.Request) {\n\tid, err := getID(r)\n\tif err != nil {\n\t\tlogRespond(w, r, err)\n\t\treturn\n\t}\n\n\tvar user models.User\n\n\terr = json.NewDecoder(r.Body).Decode(&user)\n\tif err != nil {\n\t\terr = models.NewReqErr(err, \"invalid request body\")\n\t\tlogRespond(w, r, err)\n\t\treturn\n\t}\n\n\tuser.ID = id\n\n\t// ignoring fields the user should not be allowed to update manually\n\tuser.Games = nil\n\tuser.TotalGameTime = 0\n\n\terr = h.SetUser(&user)\n\tif err != nil {\n\t\tlogRespond(w, r, err)\n\t\treturn\n\t}\n\n\trespondPlain(w, r, \"Success\")\n}",
"func (h *UserHandler) handlePutUserPassword(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tusername, err := h.putPassword(ctx, w, r)\n\tif err != nil {\n\t\tEncodeError(ctx, err, w)\n\t\treturn\n\t}\n\tfilter := platform.UserFilter{\n\t\tName: &username,\n\t}\n\tb, err := h.UserService.FindUser(ctx, filter)\n\tif err != nil {\n\t\tEncodeError(ctx, err, w)\n\t\treturn\n\t}\n\n\tif err := encodeResponse(ctx, w, http.StatusOK, newUserResponse(b)); err != nil {\n\t\tEncodeError(ctx, err, w)\n\t\treturn\n\t}\n}",
"func updateUserByIDHandler(c *gin.Context) {\n\tid, _ := strconv.Atoi(c.Param(\"id\"))\n\tuser, _ := c.Get(JwtIdentityKey)\n\n\t// Role check.\n\tif !isAdmin(user) {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\"message\": \"unauthorized\"})\n\t\treturn\n\t}\n\n\t// Decode json.\n\tvar json userUpdateRequest\n\tif err := c.ShouldBindJSON(&json); err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\tdb := data.New()\n\tu, err := db.Users.GetUserByID(id)\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\n\t\t\t\"status\": http.StatusNotFound,\n\t\t\t\"message\": \"User does not exist\",\n\t\t})\n\t\treturn\n\t}\n\n\t// Disallow updates on master user.\n\tif id != 1 {\n\t\t// Set role.\n\t\tif json.Role != \"\" {\n\t\t\tu.Role = json.Role\n\t\t}\n\n\t\t// Set active status.\n\t\tu.Active = json.Active\n\t}\n\n\tupdatedUser, _ := db.Users.UpdateUserByID(id, u)\n\tc.JSON(http.StatusOK, updatedUser)\n}",
"func UpdateUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tvar queryToken = r.URL.Query().Get(\"token\")\n\n\t\tif len(queryToken) < 1 {\n\t\t\tqueryToken = r.Header.Get(\"token\")\n\t\t}\n\n\t\tif len(queryToken) < 1 {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(string(\"token is mandatory\")))\n\t\t\treturn\n\t\t}\n\n\t\tuser := &models.UserResponse{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tsecretKey := cnf.SecretKey\n\t\ttok, err := jwt.Parse(queryToken, func(t *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(secretKey), nil\n\t\t})\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tclaims := tok.Claims.(jwt.MapClaims)\n\t\tvar ID = claims[\"sub\"].(float64) // gets the ID\n\n\t\tif int(ID) != user.ID {\n\t\t\tutil.SendBadRequest(w, errors.New(\"you can only change your own user object\"))\n\t\t\treturn\n\t\t}\n\n\t\tif err := user.Validate(); err == nil {\n\n\t\t\tdb.UpdateUser(connection, user)\n\n\t\t\tutil.SendOK(w, user)\n\n\t\t} else {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t}\n\t})\n}",
"func (c *Controller) UpdateUserByIDHandler(cxt *gin.Context){\r\n\tID := cxt.Param(\"id\")\r\n\tFirstname := cxt.PostForm(\"firstname\")\r\n\tLastname := cxt.PostForm(\"lastname\")\r\n\tUsername := cxt.PostForm(\"username\")\r\n\tEmail := cxt.PostForm(\"email\")\r\n\tnewUser := models.User {\r\n\t\tFirstname: Firstname,\r\n\t\tLastname: Lastname,\r\n\t\tUsername: Username, \r\n\t\tEmail: Email,\r\n\t}\r\n\tresult := c.service.UpdateUser(ID, &newUser)\r\n\tcxt.JSON(http.StatusOK, gin.H{\r\n\t\t\"result\": result, \r\n\t})\r\n}",
"func (s *server) updateUserHandler() http.HandlerFunc {\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tid := r.Context().Value(\"userID\")\n\n\t\t// get the target record\n\t\tu, err := s.store.UserByID(id.(string))\n\t\tif err != nil {\n\t\t\trespondJSON(w, http.StatusNotFound, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\t// decode the body of the request to get fields to update\n\t\tvar body bson.M\n\t\terr = json.NewDecoder(r.Body).Decode(&body)\n\t\tif err != nil {\n\t\t\trespondJSON(w, http.StatusBadRequest, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\terr = u.SavePartial(body)\n\t\tif err != nil {\n\t\t\trespondJSON(w, http.StatusInternalServerError, nil, err)\n\t\t\treturn\n\t\t}\n\n\t\tu.Password = datastore.PasswordMask\n\t\trespondJSON(w, http.StatusOK, u, err)\n\t}\n}",
"func UpdateUser(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tuserID, err := strconv.ParseInt(params[\"id\"], 10, 64)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tuserIDToken, err := authentication.ExtractUserId(r)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnauthorized, err)\n\t\treturn\n\t}\n\n\tif userIDToken != userID {\n\t\tresponses.Error(w, http.StatusForbidden, errors.New(\"não é possível manipular usuário de terceiros\"))\n\t\treturn\n\t}\n\n\tbodyRequest, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusUnprocessableEntity, err)\n\t\treturn\n\t}\n\n\tvar user models.User\n\tif err := json.Unmarshal(bodyRequest, &user); err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tif err := user.Prepare(false); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tuser.Id = userID\n\tif err := validateUniqueDataUser(user, false); err != nil {\n\t\tresponses.Error(w, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tdb, err := database.Connect()\n\tif err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\trepository := repository.NewRepositoryUser(db)\n\tif err = repository.UpdateUser(userID, user); err != nil {\n\t\tresponses.Error(w, http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\n\tresponses.JSON(w, http.StatusNoContent, nil)\n\n}",
"func (h *Handler) EditUserByID(w http.ResponseWriter, r *http.Request, param httprouter.Params) {\n\tuserID := param.ByName(\"userID\")\n\t// read json body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tdefer r.Body.Close()\n\tif err != nil {\n\t\trenderJSON(w, []byte(`\n\t\t\tmessage: \"Failed to read body\"\n\t\t`), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// parse json body\n\tvar user User\n\terr = json.Unmarshal(body, &user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tquery := fmt.Sprintf(\"UPDATE users SET name = '%s' WHERE id = %s\", user.Name, userID)\n\t_, err = h.DB.Query(query)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\trenderJSON(w, []byte(`\n\t{\n\t\tstatus: \"success\",\n\t\tmessage: \"Update user success!\"\n\t}\n\t`), http.StatusOK)\n}",
"func AddUserHandler(ctx *gin.Context) {\n\n\tbody := ctx.Request.Body\n\tdefer body.Close()\n\n\tuser := new(model.User)\n\n\terr := json.NewDecoder(body).Decode(user)\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tctx.JSON(400, \"Malformed request body\")\n\t\treturn\n\t}\n\n\tidStr := ctx.Param(\"id\")\n\n\tpassword := user.Password\n\tfmt.Println(password)\n\n\tif idStr != \"\" {\n\t\tid, _ := strconv.ParseInt(idStr, 10, 64)\n\t\tuser.ID = &id\n\t} else {\n\t\tpassByte, _ := bcrypt.GenerateFromPassword([]byte(\"admin\"), 0)\n\t\tuser.Password = string(passByte)\n\t}\n\n\terr = repository.AddUserOrUpdate(user)\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tctx.JSON(500, err.Error())\n\t\treturn\n\t}\n\n\tif idStr == \"\" {\n\t\tgo sendWelcomeMail(*user, password)\n\t}\n\n\tctx.JSON(200, \"Ok\")\n\n}",
"func UserIdHandler(w http.ResponseWriter, r *http.Request, c *Configuration, d *DaoWrapper) {\n\t// request method check\n\tif HandleHttpMethodError(r.Method, []string{\"GET\", \"DELETE\", \"PATCH\"}, w) {\n\t\treturn\n\t}\n\n\t// 0. Check header if it is valid\n\tappid := r.Header.Get(const_type_authorization_header_key)\n\tif appid == \"\" || !IsValidAppId(appid) {\n\t\tHandleHttpError(http.StatusUnauthorized, nil, w)\n\t\treturn\n\t}\n\n\t// get enterprise_id\n\tuser_id := r.URL.Path[len(const_endpoint_user_id):]\n\tif IsValidUserId(user_id) == false {\n\t\tHandleHttpError(http.StatusBadRequest, errors.New(\"invalid parameters\"), w)\n\t\treturn\n\t}\n\tLogInfo.Printf(\"user_id: %s\", user_id)\n\n\t// apply get/delte/patch\n\tif r.Method == \"GET\" {\n\t\t// enterprise_service.get_enterprise(enterprise_id)\n\t\t// select all from enterprise_list join user_list on enterprise_id\n\t\tent, err := UserGetById(user_id, appid, d)\n\t\tif err != nil {\n\t\t\tHandleError(-1, err, w)\n\t\t\treturn\n\t\t}\n\t\tHandleSuccess(w, ent)\n\t} else if r.Method == \"DELETE\" {\n\t\terr := UserDeleteById(user_id, appid, d)\n\t\tif err != nil {\n\t\t\tHandleError(-1, err, w)\n\t\t\treturn\n\t\t}\n\t\tClearUserPriv(appid, user_id)\n\t\tHandleSuccess(w, nil)\n\t} else if r.Method == \"PATCH\" {\n\t\tuser := getUserFromForm(r, false)\n\t\tpatchedUser, err := UserPatchById(user_id, appid, user, d)\n\t\tif err != nil {\n\t\t\tHandleError(-1, err, w)\n\t\t\treturn\n\t\t}\n\t\tClearUserPriv(appid, user_id)\n\t\tHandleSuccess(w, patchedUser)\n\t} else {\n\t\tHandleHttpError(http.StatusMethodNotAllowed, errors.New(\"Method not allowed\"), w)\n\t}\n}",
"func (h *Handler) EditUser(c *fiber.Ctx) error {\n\tservice := services.NewUserService()\n\tid, err := strconv.ParseInt(c.Params(\"id\"), 10, 32)\n\n\tif err != nil {\n\t\treturn c.Status(400).JSON(fiber.Map{\"status\": \"error\", \"message\": err.Error()})\n\t}\n\n\tvar usr user.User\n\tif err := c.BodyParser(&usr); err != nil {\n\t\treturn c.Status(422).JSON(fiber.Map{\"status\": \"error\", \"message\": \"Invalid fields\"})\n\t}\n\n\terr = service.UpdateUser(&usr, int(id))\n\n\tif err != nil {\n\t\treturn c.Status(500).JSON(fiber.Map{\"status\": \"error\", \"message\": err.Error()})\n\t}\n\n\treturn c.JSON(fiber.Map{\"status\": \"success\", \"message\": \"UpdatedUser\", \"data\": usr})\n}",
"func EditUser(w http.ResponseWriter, r *http.Request) {\n\n\tif !UserAuthorized(r) {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tuserId, _ := strconv.Atoi(mux.Vars(r)[\"userId\"])\n\n\tvar user models.User\n\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&user); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := env.db.UpdateUser(userId, &user); err != nil {\n\t\tpanic(err)\n\t}\n\n\tupdatedUser, err := env.db.FindUserById(userId)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", JSON)\n\tw.WriteHeader(http.StatusOK)\n\tif err := json.NewEncoder(w).Encode(updatedUser); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func syncUserToIdentity(usr *user.User, id *authn.Identity) {\n\tid.ID = fmt.Sprintf(\"user:%d\", usr.ID)\n\tid.Login = usr.Login\n\tid.Email = usr.Email\n\tid.Name = usr.Name\n\tid.IsGrafanaAdmin = &usr.IsAdmin\n}",
"func (ctl *controller) APIUserPutAction(ctx *gin.Context) {\n\tctl.logger.Info(\"[PUT] UserPutAction\")\n\n\tvar userRequest UserRequest\n\tuserID, err := validateUserRequestUpdate(ctx, &userRequest)\n\tif err != nil {\n\t\tctx.AbortWithError(http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\taffected, err := ctl.updateUser(&userRequest, userID)\n\tif err != nil {\n\t\tctx.AbortWithError(http.StatusInternalServerError, err)\n\t\treturn\n\t}\n\tif affected == 0 {\n\t\tctl.logger.Debug(\"nothing updated\")\n\t}\n\n\t// json response\n\tctx.JSON(http.StatusOK, jsonresp.CreateUserJSON(userID))\n}",
"func PutUser(request []string, params map[string][]string) (answer Answer) {\n\tvar err error\n\tdefer answer.make(&err, nil)\n\n\t// Parse user request parameters\n\tvar rp RequestParams = RequestParams{\n\t\t\"login\": {Optional: false, Type: String},\n\t\t\"name\": {Optional: true, Type: String},\n\t\t\"phone\": {Optional: true, Type: String},\n\t\t\"position\": {Optional: true, Type: String},\n\t\t\"comment\": {Optional: true, Type: String},\n\t}\n\n\terr = rp.Parse(params)\n\tif err != nil {\n\t\tanswer.Code = BadRequest\n\t\treturn\n\t}\n\n\t// Insert into [user]\n\tsqlText, sqlParams := rp.MakeSQLInsert(\"user\", []string{\"login\", \"name\", \"phone\", \"position\", \"comment\"})\n\tvar res sql.Result\n\tres, err = db.DB.Exec(sqlText, sqlParams...)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tanswer.ID, err = res.LastInsertId()\n\n\treturn\n}",
"func (app *application) EditUser(w http.ResponseWriter, r *http.Request) {\n\tid := chi.URLParam(r, \"id\")\n\tuserID, _ := strconv.Atoi(id)\n\n\tvar user models.User\n\n\terr := app.readJSON(w, r, &user)\n\tif err != nil {\n\t\tapp.badRequest(w, r, err)\n\t\treturn\n\t}\n\n\tif userID > 0 { // For an existing user, update the user record\n\t\terr = app.DB.EditUser(user)\n\t\tif err != nil {\n\t\t\tapp.badRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\n\t\tif user.Password != \"\" {\n\t\t\tnewHash, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\t\t\tif err != nil {\n\t\t\t\tapp.badRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = app.DB.UpdatePasswordForUser(user, string(newHash))\n\t\t\tif err != nil {\n\t\t\t\tapp.badRequest(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t} else { // For a new user, simply add the user to the users table\n\t\tnewHash, err := bcrypt.GenerateFromPassword([]byte(user.Password), 12)\n\t\tif err != nil {\n\t\t\tapp.badRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t\terr = app.DB.AddUser(user, string(newHash))\n\t\tif err != nil {\n\t\t\tapp.badRequest(w, r, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar resp struct {\n\t\tError bool `json:\"error\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tresp.Error = false\n\tapp.writeJSON(w, http.StatusOK, resp)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeleteUserHandler elimina un usuario en base al id | func DeleteUserHandler(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
k := params["id"]
if _, ok := Listusers[k]; ok {
delete(Listusers, k)
} else {
log.Printf("No encontramos el id %s", k)
}
w.WriteHeader(http.StatusNoContent)
} | [
"func DeleteUserHandler(connection *sql.DB, cnf config.Config) negroni.HandlerFunc {\n\treturn negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {\n\t\tvar queryToken = r.URL.Query().Get(\"token\")\n\n\t\tif len(queryToken) < 1 {\n\t\t\tqueryToken = r.Header.Get(\"token\")\n\t\t}\n\n\t\tif len(queryToken) < 1 {\n\t\t\tutil.SendBadRequest(w, errors.New(\"token is mandatory\"))\n\t\t\treturn\n\t\t}\n\n\t\tuser := &models.UserResponse{}\n\t\terr := util.RequestToJSON(r, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, errors.New(\"Bad json\"))\n\t\t\treturn\n\t\t}\n\n\t\tsecretKey := cnf.SecretKey\n\t\ttok, err := jwt.Parse(queryToken, func(t *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(secretKey), nil\n\t\t})\n\n\t\tclaims := tok.Claims.(jwt.MapClaims)\n\t\tvar ID = claims[\"sub\"].(float64)\n\n\t\tif int(ID) != user.ID {\n\t\t\tutil.SendBadRequest(w, errors.New(\"you can only delete your own user object\"))\n\t\t\treturn\n\t\t}\n\n\t\tdb.DeleteUser(connection, user)\n\t\tif err != nil {\n\t\t\tutil.SendBadRequest(w, err)\n\t\t\treturn\n\t\t}\n\t\tutil.SendOK(w, string(\"\"))\n\n\t})\n}",
"func (usr *userHandler) Delete(c *fiber.Ctx) error {\n\tclaims := c.Locals(mjwt.CLAIMS).(*mjwt.CustomClaim)\n\tuserIDParams := c.Params(\"user_id\")\n\n\tif claims.Identity == userIDParams {\n\t\tapiErr := resterr.NewBadRequestError(\"Tidak dapat menghapus akun terkait (diri sendiri)!\")\n\t\treturn c.Status(apiErr.Status()).JSON(fiber.Map{\"error\": apiErr, \"data\": nil})\n\t}\n\n\tapiErr := usr.service.DeleteUser(userIDParams)\n\tif apiErr != nil {\n\t\treturn c.Status(apiErr.Status()).JSON(fiber.Map{\"error\": apiErr, \"data\": nil})\n\t}\n\n\treturn c.JSON(fiber.Map{\"error\": nil, \"data\": fmt.Sprintf(\"user %s berhasil dihapus\", userIDParams)})\n}",
"func (c *Controller) DeleteUserHandler(cxt *gin.Context){\r\n\tID := cxt.Param(\"id\")\r\n\tresult := c.service.DeleteUser(ID)\r\n\tcxt.JSON(http.StatusOK, gin.H{\r\n\t\t\"result\": result, \r\n\t})\r\n}",
"func DeleteUserHandler(w http.ResponseWriter, r *http.Request, db *gorp.DbMap, c *businesscontext.Ctx) error {\n\tvars := mux.Vars(r)\n\tusername := vars[\"username\"]\n\n\tif !c.User.Admin && username != c.User.Username {\n\t\treturn WriteJSON(w, r, nil, http.StatusForbidden)\n\t}\n\n\tu, errLoad := user.LoadUserWithoutAuth(db, username)\n\tif errLoad != nil {\n\t\treturn sdk.WrapError(errLoad, \"deleteUserHandler> Cannot load user from db\")\n\t}\n\n\ttx, errb := db.Begin()\n\tif errb != nil {\n\t\treturn sdk.WrapError(errb, \"deleteUserHandler> cannot start transaction\")\n\t}\n\tdefer tx.Rollback()\n\n\tif err := user.DeleteUserWithDependencies(tx, u); err != nil {\n\t\treturn sdk.WrapError(err, \"deleteUserHandler> cannot delete user\")\n\t}\n\n\tif err := tx.Commit(); err != nil {\n\t\treturn sdk.WrapError(err, \"deleteUserHandler> cannot commit transaction\")\n\t}\n\n\treturn nil\n}",
"func DeleteUser(w http.ResponseWriter, req *http.Request) {\n\tvars := mux.Vars(req)\n\tid := vars[\"id\"]\n\n\tif err := db.Remove(id); err != nil {\n\t\thandleError(err, \"Failed to remove User: %v\", w)\n\t\treturn\n\t}\n\n\tw.Write([]byte(\"OK\"))\n}",
"func DeleteUserHandler(w http.ResponseWriter, req *http.Request) {\n\n\t// Get session values or redirect to Login\n\tsession, err := sessions.Store.Get(req, \"session\")\n\n\tif err != nil {\n\t\tlog.Println(\"error identifying session\")\n\t\thttp.Redirect(w, req, \"/login/\", 302)\n\t\treturn\n\t\t// in case of error\n\t}\n\n\t// Prep for user authentication\n\tsessionMap := getUserSessionValues(session)\n\n\tusername := sessionMap[\"username\"]\n\tloggedIn := sessionMap[\"loggedin\"]\n\tisAdmin := sessionMap[\"isAdmin\"]\n\n\tvars := mux.Vars(req)\n\tidString := vars[\"id\"]\n\n\tpk, err := strconv.Atoi(idString)\n\tif err != nil {\n\t\tpk = 0\n\t\tlog.Println(err)\n\t}\n\n\tfmt.Println(session)\n\n\tif isAdmin != \"true\" {\n\t\thttp.Redirect(w, req, \"/\", 302)\n\t\treturn\n\t}\n\n\tuser, err := database.PKLoadUser(db, int64(pk))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tfmt.Println(\"Unable to load User\")\n\t\thttp.Redirect(w, req, \"/\", http.StatusSeeOther)\n\t}\n\n\tuser.IsAdmin = true\n\n\terr = database.UpdateUser(db, user)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\twv := WebView{\n\t\tUser: user,\n\t\tIsLoggedIn: loggedIn,\n\t\tSessionUser: username,\n\t\tIsAdmin: isAdmin,\n\t\tUserFrame: false,\n\t\tArchitecture: baseArchitecture,\n\t}\n\n\tif req.Method == \"GET\" {\n\t\tRender(w, \"templates/delete_user.html\", wv)\n\t}\n\n\tif req.Method == \"POST\" {\n\n\t\terr := database.DeleteUser(db, user.ID)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\turl := \"/user_index/\"\n\n\t\thttp.Redirect(w, req, url, http.StatusFound)\n\t}\n\n}",
"func (pc UserController) Delete(c *gin.Context) {\n\tid := c.Params.ByName(\"id\")\n\tvar u repository.UserRepository\n\tidInt, _ := strconv.Atoi(id)\n\tif err := u.DeleteByID(idInt); err != nil {\n\t\tc.AbortWithStatus(403)\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\t\treturn\n\t}\n\n\tc.JSON(200, gin.H{\"success\": \"ID\" + id + \"のユーザーを削除しました\"})\n\treturn\n}",
"func HandleUserDelete(c *gin.Context) {\n\tuid := c.Param(\"uid\")\n\n\tvar u User\n\taffected, err := u.Delete(uid)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"code\": 1,\n\t\t\t\"msg\": err.Error(),\n\t\t\t\"data\": gin.H{},\n\t\t})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 0,\n\t\t\"msg\": \"ok\",\n\t\t\"data\": gin.H{\n\t\t\t\"affected\": affected,\n\t\t},\n\t})\n}",
"func (handler *UserHandler) handleDeleteUser(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tid := vars[\"id\"]\n\n\tuserID, err := strconv.Atoi(id)\n\tif err != nil {\n\t\tError(w, err, http.StatusBadRequest, handler.Logger)\n\t\treturn\n\t}\n\n\t_, err = handler.UserService.User(portainer.UserID(userID))\n\n\tif err == portainer.ErrUserNotFound {\n\t\tError(w, err, http.StatusNotFound, handler.Logger)\n\t\treturn\n\t} else if err != nil {\n\t\tError(w, err, http.StatusInternalServerError, handler.Logger)\n\t\treturn\n\t}\n\n\terr = handler.UserService.DeleteUser(portainer.UserID(userID))\n\tif err != nil {\n\t\tError(w, err, http.StatusInternalServerError, handler.Logger)\n\t\treturn\n\t}\n}",
"func DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tid := mux.Vars(r)[\"id\"]\n\tstatus := users.DeleteUser(id)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n}",
"func DeleteUserByID(c *gin.Context) {\n\tdb := database.DBConn()\n\n\tresult, err := db.Exec(\"DELETE FROM user WHERE id = ?\", c.Param(\"id\"))\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\"error\": err.Error(),\n\t\t})\n\t} else {\n\t\tnum, err := result.RowsAffected()\n\t\tif err != nil {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\n\t\t\t\t\"error\": err.Error(),\n\t\t\t})\n\t\t} else {\n\t\t\tc.JSON(http.StatusOK, gin.H{\n\t\t\t\t\"nums row affected\": num,\n\t\t\t})\n\t\t}\n\t}\n\n\tdb.Close()\n}",
"func UserDelete(w http.ResponseWriter, r *http.Request) {\n\ttmpl := shared.Template(r)\n\n\tif r.Method == \"POST\" {\n\n\t\tuser, err := models.GetUser(r.PostFormValue(\"id\"))\n\t\tif err != nil {\n\t\t\tlog.Printf(\"ERROR: %s\\n\", err)\n\t\t\tw.WriteHeader(404)\n\t\t\ttmpl.Lookup(\"errors/404\").Execute(w, shared.ErrorData(err))\n\t\t}\n\n\t\tif err := user.Delete(); err != nil {\n\t\t\tlog.Printf(\"ERROR: %s\\n\", err)\n\t\t\tw.WriteHeader(500)\n\t\t\ttmpl.Lookup(\"errors/500\").Execute(w, shared.ErrorData(err))\n\t\t\treturn\n\t\t}\n\t\thttp.Redirect(w, r, \"/admin/users\", 303)\n\n\t} else {\n\t\terr := fmt.Errorf(\"Method %q not allowed\", r.Method)\n\t\tlog.Printf(\"ERROR: %s\\n\", err)\n\t\tw.WriteHeader(405)\n\t\ttmpl.Lookup(\"errors/405\").Execute(w, shared.ErrorData(err))\n\t}\n}",
"func (s *peerRESTServer) DeleteUserHandler(w http.ResponseWriter, r *http.Request) {\n\tif !s.IsValid(w, r) {\n\t\ts.writeErrorResponse(w, errors.New(\"Invalid request\"))\n\t\treturn\n\t}\n\n\tobjAPI := newObjectLayerFn()\n\tif objAPI == nil {\n\t\ts.writeErrorResponse(w, errServerNotInitialized)\n\t\treturn\n\t}\n\n\tvars := mux.Vars(r)\n\taccessKey := vars[peerRESTUser]\n\tif accessKey == \"\" {\n\t\ts.writeErrorResponse(w, errors.New(\"username is missing\"))\n\t\treturn\n\t}\n\n\tif err := globalIAMSys.DeleteUser(accessKey); err != nil {\n\t\ts.writeErrorResponse(w, err)\n\t\treturn\n\t}\n\n\tw.(http.Flusher).Flush()\n}",
"func (uv *userValidator) Delete(id uint) error{\n\tvar user User\n\tuser.ID = id\n\terr := runUserValidatorFunction(&user, uv.idGreaterThan(0))\n\tif err != nil{\n\t\treturn err\n\t}\n\treturn uv.UserDB.Delete(id)\n}",
"func DeleteUser(w http.ResponseWriter, r *http.Request) {\n\n\tif !UserAuthorized(r) {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tuserId, _ := strconv.Atoi(mux.Vars(r)[\"userId\"])\n\n\tif err := env.db.DeleteUser(userId); err != nil {\n\t\tpanic(err)\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}",
"func DeleteByUserIdHandler(w http.ResponseWriter, req *http.Request) {\n\n}",
"func (h *Handler) DeleteUserByID(w http.ResponseWriter, r *http.Request, param httprouter.Params) {\n\tuserID := param.ByName(\"userID\")\n\n\tquery := fmt.Sprintf(\"DELETE FROM users WHERE id = %s\", userID)\n\t_, err := h.DB.Exec(query)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\trenderJSON(w, []byte(`\n\t{\n\t\tstatus: \"success\",\n\t\tmessage: \"Delete user success!\"\n\t}\n\t`), http.StatusOK)\n}",
"func (h *Handlers) Delete(c *gin.Context) {\n\tid := c.Param(\"id\")\n\n\tif !bson.IsObjectIdHex(id) {\n\t\tc.Status(404)\n\t\treturn\n\t}\n\n\tuser := &models.User{}\n\tif err := h.conn.Collection(\"users\").FindById(bson.ObjectIdHex(id), user); err != nil {\n\t\tc.Status(404)\n\t\treturn\n\t}\n\n\tif err := h.conn.Collection(\"users\").DeleteDocument(user); err != nil {\n\t\tc.Status(400)\n\t\treturn\n\t}\n\tc.Status(204)\n}",
"func DeleteHandler(w http.ResponseWriter, r *http.Request) {\n\t_, _, ok := r.BasicAuth()\n\tif !ok {\n\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(`Basic realm=\"%s\"`, BasicAuthRealm))\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\tw.Write([]byte(http.StatusText(http.StatusUnauthorized) + \"\\n\"))\n\t\treturn\n\t}\n\tif !reqIsAdmin(r) {\n\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tu := &User{\n\t\tUsername: strings.ToLower(r.FormValue(\"username\")),\n\t}\n\terr := u.Delete()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, \"User Deleted\\n\")\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
UnmarshalEasyJSON supports easyjson.Unmarshaler interface | func (v *BlitzedItemResponse) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson6a975c40DecodeJsonBenchmark4(l, v)
} | [
"func (v *Fruit) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjsonD2b7633eDecodeBackendInternalModels11(l, v)\n}",
"func (v *Entities) UnmarshalEasyJSON(l *jlexer.Lexer) {\n\teasyjson794297d0DecodeGithubComMailruEasyjsonBenchmark10(l, v)\n}",
"func (c *Context) UnmarshalEasyJSON(in *jlexer.Lexer) {\n\tContextSerialization.UnmarshalFromEasyJSONLexer(in, c)\n}",
"func (c *EventOutputContext) UnmarshalEasyJSON(in *jlexer.Lexer) {\n\tContextSerialization.UnmarshalFromEasyJSONLexerEventOutput(in, c)\n}",
"func deJSONify(i interface{}) (interface{}, error) {\n\tvar data []byte\n\tswitch t := i.(type) {\n\tcase string:\n\t\tdata = []byte(t)\n\tcase []byte:\n\t\tdata = t\n\tcase json.RawMessage:\n\t\tdata = []byte(t)\n\tdefault:\n\t\treturn i, nil\n\t}\n\tvar x interface{}\n\tif err := json.Unmarshal(data, &x); err != nil {\n\t\treturn nil, &kivik.Error{HTTPStatus: http.StatusBadRequest, Err: err}\n\t}\n\treturn x, nil\n}",
"func unmarshalJSON(j extv1.JSON, output *any) error {\n\tif len(j.Raw) == 0 {\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(j.Raw, output)\n}",
"func (this *ExternalSimple) UnmarshalJSON(b []byte) error {\n\treturn ExternalUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (this *Simple) UnmarshalJSON(b []byte) error {\n\treturn TypesUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func (m *gohaiMarshaler) UnmarshalJSON(bytes []byte) error {\n\tfirstUnmarshall := \"\"\n\terr := json.Unmarshal(bytes, &firstUnmarshall)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal([]byte(firstUnmarshall), &(m.gohai))\n\treturn err\n}",
"func (e *EnumSimple) UnmarshalJSON(buffer []byte) error {\n var result int32\n err := fbe.Json.Unmarshal(buffer, &result)\n if err != nil {\n return err\n }\n *e = EnumSimple(result)\n return nil\n}",
"func FromJSON(bytes []byte, i interface{}) {\n\terr := json.Unmarshal(bytes, i)\n\tPanicError(err)\n}",
"func Easyjson() error {\n\tmg.Deps(Tool{}.EasyJSON)\n\treturn sh.Run(\"easyjson\", \"./pkg/module/serialize/types.go\")\n}",
"func (tt *Meta) UnmarshalJSON(b []byte) error {\n return json.Unmarshal(b, &tt.meta_)\n}",
"func (manager *Manager) Unmarshal(reader io.Reader, refs references.Store) error {\n\tif manager.specs == nil {\n\t\treturn nil\n\t}\n\n\tbb, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(bb) == 0 {\n\t\treturn nil\n\t}\n\n\tobject := NewObject(manager.resource, manager.specs.Message, refs)\n\terr = gojay.UnmarshalJSONObject(bb, object)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {\n\td := json.NewDecoder(r)\n\tfor _, opt := range opts {\n\t\td = opt(d)\n\t}\n\tif err := d.Decode(&o); err != nil {\n\t\treturn fmt.Errorf(\"while decoding JSON: %v\", err)\n\t}\n\treturn nil\n}",
"func (a *Addon) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &a.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"kind\":\n\t\t\terr = unpopulate(val, \"Kind\", &a.Kind)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &a.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"systemData\":\n\t\t\terr = unpopulate(val, \"SystemData\", &a.SystemData)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &a.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", a, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func JSONDecode(ctx context.Context, data []byte, obj interface{}) error {\n\treturn json.Unmarshal(data, obj)\n}",
"func (this *ExternalSimple_ExternalNested) UnmarshalJSON(b []byte) error {\n\treturn ExternalUnmarshaler.Unmarshal(bytes.NewReader(b), this)\n}",
"func UnmarshalFromJSON(data []byte, target interface{}) error {\n\tvar ctx map[string]interface{}\n\terr := json.Unmarshal(data, &ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Unmarshal(ctx, target)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
People ... Create a router group to rs/crud/persons and relative routes | func People(engine *gin.Engine, midlewares ...gin.HandlerFunc) {
personGroup := engine.Group("rs/crud/person")
personGroup.GET("/:id", controllers.GetPerson)
personGroup.GET("/", controllers.GetPagePerson)
personGroup.PUT("/:id", controllers.PutPerson)
personGroup.DELETE("/:id", controllers.DeletePerson)
} | [
"func MakePersonHandlers(r *mux.Router, n negroni.Negroni, service person.UseCase) {\n\tr.Handle(\"/person\", n.With(\n\t\tnegroni.Wrap(findAllPersons(service)),\n\t)).Methods(\"GET\", \"OPTIONS\").Name(\"findAllPersons\")\n\n\tr.Handle(\"/person/{key}\", n.With(\n\t\tnegroni.Wrap(findPersonByKey(service)),\n\t)).Methods(\"GET\", \"OPTIONS\").Name(\"findPersonByKey\")\n\n\tr.Handle(\"/person\", n.With(\n\t\tnegroni.Wrap(personAdd(service)),\n\t)).Methods(\"POST\", \"OPTIONS\").Name(\"personAdd\")\n\n\tr.Handle(\"/persons\", n.With(\n\t\tnegroni.Wrap(personMultiAdd(service)),\n\t)).Methods(\"POST\", \"OPTIONS\").Name(\"personMultiAdd\")\n\n\tr.Handle(\"/person/{key}\", n.With(\n\t\tnegroni.Wrap(deletePerson(service)),\n\t)).Methods(\"DELETE\", \"OPTIONS\").Name(\"deletePerson\")\n\n}",
"func RegisterRoutesPersons(mux *mux.Router, person interfaces.PersonDao) {\n\thandler := handlers.NewPersonHandler(person)\n\tmux.HandleFunc(\"/persons\", handler.CreatePerson).Methods(http.MethodPost)\n\tmux.HandleFunc(\"/persons/{id}\", handler.GetOne).Methods(http.MethodGet)\n\tmux.HandleFunc(\"/persons/signin\", handler.SignIn).Methods(http.MethodPost)\n\tmux.Handle(\"/persons/update\", middlewares.Authenticate(http.HandlerFunc(handler.Update))).Methods(http.MethodPut)\n\tmux.Handle(\"/persons/new-professor\", middlewares.Authenticate(middlewares.PersonRole(http.HandlerFunc(handler.CreateProfessor), 0))).Methods(http.MethodPost)\n\tmux.HandleFunc(\"/persons/section/{id}/{startDate}/{endDate}\", handler.GetAllBySectionIDAndDateRange).Methods(http.MethodGet)\n}",
"func AddRESTRoutes(router *mux.Router) {\n\trouter.HandleFunc(\"/api/v1/user\", usersGet).Methods(\"GET\")\n\trouter.HandleFunc(\"/api/v1/user\", userPost).Methods(\"POST\")\n\trouter.HandleFunc(\"/api/v1/user/{userID}\", userGet).Methods(\"GET\")\n\trouter.HandleFunc(\"/api/v1/user/{userID}\", userPut).Methods(\"PUT\")\n\trouter.HandleFunc(\"/api/v1/user/{userID}\", userDelete).Methods(\"DELETE\")\n}",
"func RegisterPersons(party iris.Party) {\n\tcrs := cors.New(cors.Options{\n\t\tAllowedOrigins: []string{\"*\"}, // allows everything, use that to change the hosts.\n\t\tAllowCredentials: true,\n\t\tAllowedMethods: []string{iris.MethodGet, iris.MethodPost, iris.MethodPut, iris.MethodDelete},\n\t})\n\tpersons := party.Party(\"/persons\", crs).AllowMethods(iris.MethodOptions)\n\t{\n\t\tpersons.Get(\"\", GetPersons)\n\t\tpersons.Get(\"/{id:int}\", GetPersonByID)\n\t\tpersons.Post(\"\", AddPerson)\n\t\tpersons.Put(\"/{id:int}\", UpdatePerson)\n\t\tpersons.Delete(\"/{id:int}\", DeletePerson)\n\t}\n}",
"func createRoutes(app *iris.Application, xorm *xorm.Engine) {\n\n\tmvc.Configure(app.Party(\"/security\", LanguageHandler), func(app *mvc.Application) {\n\t\tapp.Register(app, xorm, &config.GralConf).Handle(new(securityctrl.Definition))\n\t})\n\n\tmvc.Configure(app.Party(\"/users\", LanguageHandler), func(app *mvc.Application) {\n\t\tapp.Register(app, xorm, &config.GralConf).Handle(new(usersctrl.Definition))\n\t})\n}",
"func Routes(DB *gorm.DB, group echo.Group) {\n\t/*\n\t\tPasamos al controlador de la entidad el grupo de las rutas y le inyectamos la configuracion de base de datos\n\n\t\tWe pass to the heandler of the entity the group of the routes and we inject the database configuration\n\t*/\n\n\tv1.NewUserController(group, DB)\n\tv1.NewCharacterController(group, DB)\n\tv1.NewAwardController(group, DB)\n\tv1.NewTaskController(group, DB)\n\n}",
"func RouteToV1(r *router.MyRouter) {\n\t// User\n\tr.HandlerFunc(\"POST\", \"/members/\", \"CreateMemberUser\", handler.CreateMemberUser)\n\tr.HandlerFunc(\"GET\", \"/members/\", \"ListMemberUsers\", handler.ListMemberUsers)\n\tr.HandlerFunc(\"GET\", \"/members/:mid/\", \"GetMemberUser\", handler.GetMemberUser)\n\tr.HandlerFunc(\"DELETE\", \"/members/:mid/\", \"DeleteMemberUser\", handler.DeleteMemberUser)\n\n\t// Token\n\tr.HandlerFunc(\"POST\", \"/oauth2/tokens/\", \"IssueToken\", handler.IssueToken)\n\tr.HandlerFunc(\"GET\", \"/oauth2/tokens/:tk/\", \"ValidateToken\", handler.ValidateToken)\n\tr.HandlerFunc(\"DELETE\", \"/oauth2/tokens/:tk/\", \"RevolkToken\", handler.RevolkToken)\n\n\t// Project\n\tr.HandlerFunc(\"POST\", \"/projects/\", \"CreateProject\", handler.CreateProject)\n\tr.HandlerFunc(\"GET\", \"/projects/\", \"ListDomainProjects\", handler.ListDomainProjects)\n\tr.HandlerFunc(\"GET\", \"/self/projects/\", \"ListUserProjects\", handler.ListUserProjects)\n\tr.HandlerFunc(\"GET\", \"/projects/:pid/\", \"GetProject\", handler.GetProject)\n\tr.HandlerFunc(\"DELETE\", \"/projects/:pid/\", \"DeleteProject\", handler.DeleteProject)\n\tr.HandlerFunc(\"GET\", \"/projects/:pid/members/\", \"ListProjectUser\", handler.ListProjectUser)\n\tr.HandlerFunc(\"POST\", \"/projects/:pid/members/\", \"AddUsersToProject\", handler.AddUsersToProject)\n\tr.HandlerFunc(\"DELETE\", \"/projects/:pid/members/\", \"RemoveUsersFromProject\", handler.RemoveUsersFromProject)\n\n\t// Application\n\tr.HandlerFunc(\"POST\", \"/applications/\", \"CreateApplication\", handler.CreateApplication)\n\tr.HandlerFunc(\"GET\", \"/applications/\", \"ListUserApplications\", handler.ListUserApplications)\n\tr.HandlerFunc(\"GET\", \"/applications/:aid/\", \"GetApplication\", handler.GetApplication)\n\tr.HandlerFunc(\"DELETE\", \"/applications/:aid/\", \"DeleteApplication\", handler.DeleteApplication)\n\t// // r.HandlerFunc(\"PUT\", \"/v1/users/:uid/applications/:aid/\", handler.UpdateApplication)\n\n\t// Service\n\tr.HandlerFunc(\"POST\", \"/services/\", \"CreateService\", handler.CreateService)\n\tr.HandlerFunc(\"GET\", \"/services/\", \"ListServices\", handler.ListServices)\n\tr.HandlerFunc(\"GET\", \"/services/:sid/\", \"GetService\", handler.GetService)\n\tr.HandlerFunc(\"DELETE\", \"/services/:sid/\", \"DeleteService\", handler.DeleteService)\n\tr.HandlerFunc(\"POST\", \"/features/\", \"RegistryServiceFeatures\", handler.RegistryServiceFeatures)\n\tr.HandlerFunc(\"GET\", \"/services/:sid/features/\", \"ListServiceFeatures\", handler.ListServiceFeatures)\n\n\t// Role\n\tr.HandlerFunc(\"POST\", \"/roles/\", \"CreateRole\", handler.CreateRole)\n\tr.HandlerFunc(\"GET\", \"/roles/\", \"ListRoles\", handler.ListRoles)\n\tr.HandlerFunc(\"GET\", \"/roles/:ri/\", \"GetRole\", handler.GetRole)\n\tr.HandlerFunc(\"DELETE\", \"/roles/:ri/\", \"DeleteRole\", handler.DeleteRole)\n\tr.HandlerFunc(\"POST\", \"/roles/:ri/features/\", \"AddFeaturesToRole\", handler.AddFeaturesToRole)\n\tr.HandlerFunc(\"DELETE\", \"/roles/:ri/features/\", \"RemoveFeaturesFromRole\", handler.RemoveFeaturesFromRole)\n\n\t// r.HandlerFunc(\"POST\", \"/v1/domains/users/\", \"CreateDomainUser\", handler.CreateDomainUser)\n\t// r.HandlerFunc(\"GET\", \"/v1/users/:uid/domains/\", \"ListUserDomain\", handler.ListUserDomain)\n\t// r.HandlerFunc(\"PUT\", \"/v1/users/:uid/password/\", \"SetUserPassword\", handler.SetUserPassword)\n\t// r.HandlerFunc(\"DELETE\", \"/v1/unregistry/\", \"UnRegistry\", handler.UnRegistry)\n\t// r.HandlerFunc(\"POST\", \"/v1/users/:uid/projects/\", \"AddProjectsToUser\", handler.AddProjectsToUser)\n\t// r.HandlerFunc(\"DELETE\", \"/v1/users/:uid/projects/\", \"RemoveProjectsFromUser\", handler.RemoveProjectsFromUser)\n\t// r.HandlerFunc(\"POST\", \"/v1/users/:uid/bind/roles/:rn/\", \"BindRole\", handler.BindRole)\n\t// r.HandlerFunc(\"POST\", \"/v1/users/:uid/unbind/roles/:rn/\", \"UnBindRole\", handler.UnBindRole)\n\t// r.HandlerFunc(\"POST\", \"/v1/invitations/\", \"InvitationsUser\", handler.InvitationsUser)\n\t// r.HandlerFunc(\"DELETE\", \"/v1/invitations/:code/\", \"RevolkInvitation\", handler.RevolkInvitation)\n\t// r.HandlerFunc(\"GET\", \"/v1/invitations/\", \"ListInvitationsRecords\", handler.ListInvitationsRecords)\n\t// r.HandlerFunc(\"GET\", \"/v1/invitations/:code/\", \"GetInvitationsRecord\", handler.GetInvitationsRecord)\n\t// r.HandlerFunc(\"POST\", \"/v1/registry/\", \"RegistryUser\", handler.RegistryUser)\n\t// r.HandlerFunc(\"POST\", \"/v1/verifycode/\", \"IssueVerifyCode\", handler.IssueVerifyCode)\n\t// r.HandlerFunc(\"POST\", \"/v1/invitations/users/:uid/code/:code/\", \"AcceptInvitation\", handler.AcceptInvitation)\n\t// r.HandlerFunc(\"PUT\", \"/v1/users/:uid/\", handler.UpdateUser)\n\t// r.HandlerFunc(\"POST\", \"/v1/default/projects/:pid/\", \"SetUserDefaultProject\", handler.SetUserDefaultProject)\n\n\tr.AddV1Root()\n}",
"func UsersRoutes(router *gin.Engine, controller *controllers.UsersController) {\n\tuserRoutes := router.Group(\"/users\")\n\t{\n\t\tuserRoutes.POST(\"/\", controller.CreateUserController)\n\t}\n}",
"func Routes(router *httprouter.Router, ds *store.InMemory) {\n\n\t// Client\n\trouter.GET(\"/api/clients\", readClients(ds)) // Check\n\trouter.GET(\"/api/client\", readClients(ds))\n\n\trouter.GET(\"/api/client/:id\", readClients(ds)) // Check\n\n\trouter.POST(\"/api/client\", createOrUpdateClient(ds)) // Not Implement\n\n\trouter.POST(\"/api/client/\", createOrUpdateClient(ds)) // Not Implement\n\n\trouter.PUT(\"/api/client/:id\", createOrUpdateClient(ds)) // Not Implement\n\n\trouter.PATCH(\"/api/client/:id\", createOrUpdateClient(ds)) // Not Implement\n\n\trouter.DELETE(\"/api/client/:id\", deleteClient(ds)) // Not Implement\n}",
"func Routes(router *gin.RouterGroup) {\n\tr := &pipeRoutes{}\n\n\trouter.GET(\"/\", r.get)\n\trouter.GET(\"/:pipe-id\", r.get)\n\trouter.DELETE(\"/:pipe-id\", r.delete)\n\trouter.POST(\"/\", r.post)\n}",
"func Routes(route *gin.Engine) {\n\trouter := route.Group(\"/users\")\n\trouter.POST(\"/\", CreateUser)\n\trouter.GET(\"/:id\", GetUser)\n\trouter.PUT(\"/:id\", UpdateUser)\n\trouter.DELETE(\"/:id\", DeleteUser)\n\n}",
"func AddRouter(engine *gin.Engine) {\n\tr := engine.Group(\"/api/v1\")\n\n\tr.GET(\"/post\", post.Index)\n\tr.GET(\"/post/:uuid\", post.Detail)\n\tr.GET(\"/post/:uuid/more\", post.More)\n}",
"func AddApproutes(route *mux.Router) {\n\n\tsetStaticFolder(route)\n\n\troute.HandleFunc(\"/\", renderHome)\n\n\troute.HandleFunc(\"/users/{name}\", getUsers).Methods(\"GET\")\n\n\tfmt.Println(\"Routes are Loded.\")\n}",
"func AddRoutes(router *routes.Router) {\n\trouter.Route([]interface{}{\"actor\", routes.Param(\"actor\")}, actorParam(http.HandlerFunc(showActor)))\n}",
"func generateUserAPIRoutes(router *mux.Router) {\n\tusers := router.PathPrefix(\"/users\").Subrouter()\n\tusers.Use(helpers.LoggingMiddleware)\n\tusers.HandleFunc(\"\", user.GetAll).Methods(\"GET\") // GET Request to handle all data present in the Database\n\n\tsub := router.PathPrefix(\"/user\").Subrouter()\n\tsub.Use(helpers.LoggingMiddleware)\n\t\n\tsub.HandleFunc(\"\", user.GetUser).Methods(\"GET\")\n}",
"func UsersRouter(w http.ResponseWriter, r *http.Request) {\n\tpath := strings.TrimSuffix(r.URL.Path, \"/\")\n\n\tif path == \"/users\" {\n\t\tswitch r.Method {\n\t\tcase http.MethodGet:\n\t\t\tusersGetAll(w, r)\n\t\t\treturn\n\t\tcase http.MethodPost:\n\t\t\tusersPostOne(w, r)\n\t\t\treturn\n\t\tcase http.MethodHead:\n\t\t\tusersGetAll(w, r)\n\t\t\treturn\n\t\tcase http.MethodOptions:\n\t\t\tpostOptionsResponse(w, []string{http.MethodGet, http.MethodPost, http.MethodHead, http.MethodOptions}, nil)\n\t\t\treturn\n\t\tdefault:\n\t\t\tpostError(w, http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\t}\n\n\tpath = strings.TrimPrefix(path, \"/users/\")\n\tif !bson.IsObjectIdHex(path) {\n\t\tpostError(w, http.StatusNotFound)\n\t\treturn\n\t}\n\n\tid := bson.ObjectIdHex(path)\n\n\tswitch r.Method {\n\tcase http.MethodGet:\n\t\tusersGetOne(w, r, id)\n\t\treturn\n\tcase http.MethodPut:\n\t\tusersPutOne(w, r, id)\n\t\treturn\n\tcase http.MethodPatch:\n\t\tusersPatchOne(w, r, id)\n\t\treturn\n\tcase http.MethodDelete:\n\t\tusersDeleteOne(w, r, id)\n\t\treturn\n\tcase http.MethodHead:\n\t\tusersGetOne(w, r, id)\n\t\treturn\n\tcase http.MethodOptions:\n\t\tpostOptionsResponse(w, []string{http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodDelete, http.MethodHead, http.MethodOptions}, nil)\n\t\treturn\n\tdefault:\n\t\tpostError(w, http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n}",
"func AddPeople(w http.ResponseWriter, r *http.Request) {\n\tmyDb, err := db.StartDB(\"mydb.db\")\n\tif err != nil {\n\t\tfmt.Printf(\"Fail in open database: %v\\n\", err)\n\t\treturn\n\t}\n\n\t// Verify token\n\ttoken := r.Header.Get(\"AuthToken\")\n\tif (!myDb.IsLogIn([]byte(token))) {\n\t\tfmt.Printf(\"Unauthorized: %v\\n\", err)\n\t\t// 401: Unauthorized\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// Add a people\n\tvars := mux.Vars(r)\n\tpeopleId, err := strconv.Atoi(vars[\"peopleId\"])\n\n\tbody, err := ioutil.ReadAll(r.Body)\n if err != nil {\n\t\tfmt.Printf(\"Read body error: %v\\n\", err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n\t\n\tif err := myDb.AddObj(\"people\", []byte(strconv.Itoa(peopleId)),[]byte(body)); err != nil {\n\t\tfmt.Printf(\"Read body error: %v\\n\", err)\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n\t\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n}",
"func PublicRoutes(a *fiber.App) {\n\t// Create routes group.\n\troute := a.Group(\"/api/v1\")\n\n\t// Routes for GET method:\n\troute.Get(\"/books\", controllers.GetBooks) // get list of all books\n\troute.Get(\"/book/:id\", controllers.GetBook) // get one book by ID\n\troute.Get(\"/token/new\", controllers.GetNewAccessToken) // create a new access tokens\n}",
"func PublicRoutes(a *fiber.App) {\n\t// Create routes group.\n\troute := a.Group(\"/api/v1\")\n\n\t// Routes for GET method:\n\troute.Get(\"/books\", controllers.GetBooks) // get list of all books\n\troute.Get(\"/book/:id\", controllers.GetBook) // get one book by ID\n\n\t// Routes for POST method:\n\troute.Post(\"/user/sign/up\", controllers.UserSignUp) // register a new user\n\troute.Post(\"/user/sign/in\", controllers.UserSignIn) // auth, return Access & Refresh tokens\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate validates this io k8s api core v1 scoped resource selector requirement | func (m *IoK8sAPICoreV1ScopedResourceSelectorRequirement) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateOperator(formats); err != nil {
res = append(res, err)
}
if err := m.validateScopeName(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
} | [
"func isValidKubernetesResource(id yaml.ResourceIdentifier) bool {\n\treturn id.GetKind() != \"\" && id.GetAPIVersion() != \"\" && id.GetName() != \"\"\n}",
"func (r Resource) Valid() (err error) {\n\tswitch r {\n\tcase AuthorizationsResource: // 0\n\tcase BucketsResource: // 1\n\tcase DashboardsResource: // 2\n\tcase OrgsResource: // 3\n\tcase TasksResource: // 4\n\tcase TelegrafsResource: // 5\n\tcase SourcesResource: // 6\n\tcase UsersResource: //7\n\tdefault:\n\t\terr = ErrInvalidResource\n\t}\n\n\treturn err\n}",
"func (m *JsonToMetadata_Selector) Validate() error {\n\treturn m.validate(false)\n}",
"func (s *Selector) Validate(ver, path string, ignoreStatus, ignoreSpec bool) []error {\n\treturn nil\n}",
"func (m *IoK8sAPICoreV1TopologySelectorLabelRequirement) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateValues(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateEndpointRequestResource(req *request.Request, resource arn.Resource) error {\n\tresReq := s3shared.ResourceRequest{Request: req, Resource: resource}\n\n\tif len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() {\n\t\treturn s3shared.NewClientPartitionMismatchError(resource,\n\t\t\treq.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)\n\t}\n\n\tif !resReq.AllowCrossRegion() && resReq.IsCrossRegion() {\n\t\treturn s3shared.NewClientRegionMismatchError(resource,\n\t\t\treq.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)\n\t}\n\n\t// Accelerate not supported\n\tif aws.BoolValue(req.Config.S3UseAccelerate) {\n\t\treturn s3shared.NewClientConfiguredForAccelerateError(resource,\n\t\t\treq.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)\n\t}\n\treturn nil\n}",
"func (m *IoK8sAPICoreV1ResourceQuotaSpec) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateHard(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateScopeSelector(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m LabelSelectorOperator) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (r *Resource) Validate() error {\n\tif len(r.Group) == 0 {\n\t\treturn fmt.Errorf(\"group cannot be empty\")\n\t}\n\tif len(r.Version) == 0 {\n\t\treturn fmt.Errorf(\"version cannot be empty\")\n\t}\n\tif len(r.Kind) == 0 {\n\t\treturn fmt.Errorf(\"kind cannot be empty\")\n\t}\n\n\tif len(r.Resource) == 0 {\n\t\tr.Resource = flect.Pluralize(strings.ToLower(r.Kind))\n\t}\n\n\tgroupMatch := regexp.MustCompile(\"^[a-z]+$\")\n\tif !groupMatch.MatchString(r.Group) {\n\t\treturn fmt.Errorf(\"group must match ^[a-z]+$ (was %s)\", r.Group)\n\t}\n\n\tversionMatch := regexp.MustCompile(\"^v\\\\d+(alpha\\\\d+|beta\\\\d+)?$\")\n\tif !versionMatch.MatchString(r.Version) {\n\t\treturn fmt.Errorf(\n\t\t\t\"version must match ^v\\\\d+(alpha\\\\d+|beta\\\\d+)?$ (was %s)\", r.Version)\n\t}\n\tif r.Kind != flect.Pascalize(r.Kind) {\n\t\treturn fmt.Errorf(\"kind must be camelcase (expected %s was %s)\", flect.Pascalize(r.Kind), r.Kind)\n\t}\n\n\treturn nil\n}",
"func validateResourcePool(p *vsphere.Platform, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tdc := p.Datacenter\n\tif len(dc) == 0 {\n\t\tdc = \"<datacenter>\"\n\t}\n\tcluster := p.Cluster\n\tif len(cluster) == 0 {\n\t\tcluster = \"<cluster>\"\n\t}\n\texpectedPrefix := fmt.Sprintf(\"/%s/host/%s/Resources/\", dc, cluster)\n\n\tif !strings.HasPrefix(p.ResourcePool, expectedPrefix) {\n\t\terrMsg := fmt.Sprintf(\"resourcePool must be absolute path: expected prefix %s\", expectedPrefix)\n\t\tallErrs = append(allErrs, field.Invalid(fldPath.Child(\"resourcePool\"), p.ResourcePool, errMsg))\n\t}\n\n\treturn allErrs\n}",
"func (m *IoK8sApimachineryPkgApisMetaV1LabelSelectorRequirement) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKey(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOperator(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateControllerRegistrationSpec(spec *core.ControllerRegistrationSpec, fldPath *field.Path) field.ErrorList {\n\tallErrs := field.ErrorList{}\n\n\tvar (\n\t\tresourcesPath = fldPath.Child(\"resources\")\n\t\tdeploymentPath = fldPath.Child(\"deployment\")\n\n\t\tresources = make(map[string]string, len(spec.Resources))\n\t\tcontrolsResourcesPrimarily = false\n\t)\n\n\tfor i, resource := range spec.Resources {\n\t\tidxPath := resourcesPath.Index(i)\n\n\t\tif len(resource.Kind) == 0 {\n\t\t\tallErrs = append(allErrs, field.Required(idxPath.Child(\"kind\"), \"field is required\"))\n\t\t}\n\n\t\tif !extensionsv1alpha1.ExtensionKinds.Has(resource.Kind) {\n\t\t\tallErrs = append(allErrs, field.NotSupported(idxPath.Child(\"kind\"), resource.Kind, extensionsv1alpha1.ExtensionKinds.UnsortedList()))\n\t\t}\n\n\t\tif len(resource.Type) == 0 {\n\t\t\tallErrs = append(allErrs, field.Required(idxPath.Child(\"type\"), \"field is required\"))\n\t\t}\n\t\tif t, ok := resources[resource.Kind]; ok && t == resource.Type {\n\t\t\tallErrs = append(allErrs, field.Duplicate(idxPath, common.ExtensionID(resource.Kind, resource.Type)))\n\t\t}\n\t\tif resource.Kind != extensionsv1alpha1.ExtensionResource {\n\t\t\tif resource.GloballyEnabled != nil {\n\t\t\t\tallErrs = append(allErrs, field.Forbidden(idxPath.Child(\"globallyEnabled\"), fmt.Sprintf(\"field must not be set when kind != %s\", extensionsv1alpha1.ExtensionResource)))\n\t\t\t}\n\t\t\tif resource.ReconcileTimeout != nil {\n\t\t\t\tallErrs = append(allErrs, field.Forbidden(idxPath.Child(\"reconcileTimeout\"), fmt.Sprintf(\"field must not be set when kind != %s\", extensionsv1alpha1.ExtensionResource)))\n\t\t\t}\n\t\t}\n\n\t\tresources[resource.Kind] = resource.Type\n\t\tif resource.Primary == nil || *resource.Primary {\n\t\t\tcontrolsResourcesPrimarily = true\n\t\t}\n\t}\n\n\tif spec.Deployment != nil {\n\t\tif policy := spec.Deployment.Policy; policy != nil && !availablePolicies.Has(string(*policy)) {\n\t\t\tallErrs = append(allErrs, field.NotSupported(deploymentPath.Child(\"policy\"), *policy, availablePolicies.List()))\n\t\t}\n\n\t\tif spec.Deployment.SeedSelector != nil {\n\t\t\tif controlsResourcesPrimarily {\n\t\t\t\tallErrs = append(allErrs, field.Forbidden(deploymentPath.Child(\"seedSelector\"), \"specifying a seed selector is not allowed when controlling resources primarily\"))\n\t\t\t}\n\n\t\t\tallErrs = append(allErrs, metav1validation.ValidateLabelSelector(spec.Deployment.SeedSelector, deploymentPath.Child(\"seedSelector\"))...)\n\t\t}\n\t}\n\n\treturn allErrs\n}",
"func ValidateResourcesExistInK8sComponent(client kclient.ClientInterface, devfileObj parser.DevfileObj, k8sComponent devfile.Component, context string) (kindErr string, err error) {\n\t// get the string representation of the YAML definition of a CRD\n\tuList, err := libdevfile.GetK8sComponentAsUnstructuredList(devfileObj, k8sComponent.Name, context, devfilefs.DefaultFs{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, u := range uList {\n\t\t_, err = client.GetRestMappingFromUnstructured(u)\n\t\tif err != nil && u.GetKind() != \"ServiceBinding\" {\n\t\t\t// getting a RestMapping would fail if there are no matches for the Kind field on the cluster;\n\t\t\t// but if it's a \"ServiceBinding\" resource, we don't add it to unsupported list because odo can create links\n\t\t\t// without having SBO installed\n\t\t\treturn u.GetKind(), errors.New(\"resource not supported\")\n\t\t}\n\t}\n\treturn \"\", nil\n}",
"func validateExtendResources(req *admissionv1.AdmissionRequest) error {\n\t// This handler should only get called on Pod objects.\n\t// However, if different kind of object is invoked, issue a log message\n\t// but let the object request pass through.\n\n\tif req.Resource != podResource {\n\t\tklog.Infof(\"expect resource to be: %s, instead request resource: %s\", podResource, req.Resource)\n\t\treturn nil\n\t}\n\n\t// Parse the Pod object.\n\traw := req.Object.Raw\n\tpod := corev1.Pod{}\n\tif _, _, err := universalDeserializer.Decode(raw, nil, &pod); err != nil {\n\t\treturn fmt.Errorf(\"could not deserialize pod object: %v\", err)\n\t}\n\n\textendedResourcesUsedByPod := GetExtendResourcesUsedByPod(&pod)\n\textenedResourceTolerationsUsedByPod := GetExtendResourceTolerationsUsedByPod(&pod)\n\n\tif !(*extenedResourceTolerationsUsedByPod).IsSubset(*extendedResourcesUsedByPod) {\n\t\treturn fmt.Errorf(\"Forbidden Toleration Usage\")\n\t}\n\treturn nil\n}",
"func validateResource(request resource.Quantity, limit resource.Quantity, resourceName corev1.ResourceName) []error {\n\tvalidationErrors := make([]error, 0)\n\tif !limit.IsZero() && request.Cmp(limit) > 0 {\n\t\tvalidationErrors = append(validationErrors, errors.Errorf(\"Request must be less than or equal to %s limit\", resourceName))\n\t}\n\tif request.Cmp(resource.Quantity{}) < 0 {\n\t\tvalidationErrors = append(validationErrors, errors.Errorf(\"Resource %s request value must be non negative\", resourceName))\n\t}\n\tif limit.Cmp(resource.Quantity{}) < 0 {\n\t\tvalidationErrors = append(validationErrors, errors.Errorf(\"Resource %s limit value must be non negative\", resourceName))\n\t}\n\n\treturn validationErrors\n}",
"func (r *ScaledObjectReconciler) checkTargetResourceIsScalable(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (kedav1alpha1.GroupVersionKindResource, error) {\n\tgvkr, err := kedautil.ParseGVKR(r.restMapper, scaledObject.Spec.ScaleTargetRef.APIVersion, scaledObject.Spec.ScaleTargetRef.Kind)\n\tif err != nil {\n\t\tlogger.Error(err, \"Failed to parse Group, Version, Kind, Resource\", \"apiVersion\", scaledObject.Spec.ScaleTargetRef.APIVersion, \"kind\", scaledObject.Spec.ScaleTargetRef.Kind)\n\t\treturn gvkr, err\n\t}\n\tgvkString := gvkr.GVKString()\n\tlogger.V(1).Info(\"Parsed Group, Version, Kind, Resource\", \"GVK\", gvkString, \"Resource\", gvkr.Resource)\n\n\t// let's try to detect /scale subresource\n\tscale, errScale := (*r.scaleClient).Scales(scaledObject.Namespace).Get(context.TODO(), gvkr.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{})\n\tif errScale != nil {\n\t\t// not able to get /scale subresource -> let's check if the resource even exist in the cluster\n\t\tunstruct := &unstructured.Unstructured{}\n\t\tunstruct.SetGroupVersionKind(gvkr.GroupVersionKind())\n\t\tif err := r.Client.Get(context.TODO(), client.ObjectKey{Namespace: scaledObject.Namespace, Name: scaledObject.Spec.ScaleTargetRef.Name}, unstruct); err != nil {\n\t\t\t// resource doesn't exist\n\t\t\tlogger.Error(err, \"Target resource doesn't exist\", \"resource\", gvkString, \"name\", scaledObject.Spec.ScaleTargetRef.Name)\n\t\t\treturn gvkr, err\n\t\t}\n\t\t// resource exist but doesn't expose /scale subresource\n\t\tlogger.Error(errScale, \"Target resource doesn't expose /scale subresource\", \"resource\", gvkString, \"name\", scaledObject.Spec.ScaleTargetRef.Name)\n\t\treturn gvkr, errScale\n\t}\n\n\t// if it is not already present in ScaledObject Status:\n\t// - store discovered GVK and GVKR\n\t// - store original scaleTarget's replica count (before scaling with KEDA)\n\tif scaledObject.Status.ScaleTargetKind != gvkString || scaledObject.Status.OriginalReplicaCount == nil {\n\t\tstatus := scaledObject.Status.DeepCopy()\n\t\tif scaledObject.Status.ScaleTargetKind != gvkString {\n\t\t\tstatus.ScaleTargetKind = gvkString\n\t\t\tstatus.ScaleTargetGVKR = &gvkr\n\t\t}\n\t\tif scaledObject.Status.OriginalReplicaCount == nil {\n\t\t\tstatus.OriginalReplicaCount = &scale.Spec.Replicas\n\t\t}\n\n\t\tif err := kedacontrollerutil.UpdateScaledObjectStatus(r.Client, logger, scaledObject, status); err != nil {\n\t\t\treturn gvkr, err\n\t\t}\n\t\tlogger.Info(\"Detected resource targeted for scaling\", \"resource\", gvkString, \"name\", scaledObject.Spec.ScaleTargetRef.Name)\n\t}\n\n\treturn gvkr, nil\n}",
"func validateCustomResource(instance *operatorv1.Installation) error {\n\tif instance.Spec.CNI == nil {\n\t\treturn fmt.Errorf(\"spec.cni must be defined\")\n\t}\n\n\t// Perform validation based on the chosen CNI plugin.\n\t// For example, make sure the plugin is supported on the specified k8s provider.\n\tswitch instance.Spec.CNI.Type {\n\tcase operatorv1.PluginCalico:\n\t\tswitch instance.Spec.CNI.IPAM.Type {\n\t\tcase operatorv1.IPAMPluginCalico:\n\t\tcase operatorv1.IPAMPluginHostLocal:\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"spec.cni.ipam.type %s is not compatible with spec.cni.type %s, valid IPAM values %s\",\n\t\t\t\tinstance.Spec.CNI.IPAM.Type, instance.Spec.CNI.Type,\n\t\t\t\tstrings.Join([]string{\n\t\t\t\t\toperatorv1.IPAMPluginCalico.String(),\n\t\t\t\t\toperatorv1.IPAMPluginHostLocal.String()}, \",\",\n\t\t\t\t),\n\t\t\t)\n\t\t}\n\tcase operatorv1.PluginGKE:\n\t\t// The GKE CNI plugin is only supported on GKE or BYO.\n\t\tswitch instance.Spec.KubernetesProvider {\n\t\tcase operatorv1.ProviderGKE, \"\":\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"spec.kubernetesProvider %s is not compatible with spec.cni.type %s\",\n\t\t\t\tinstance.Spec.KubernetesProvider, instance.Spec.CNI.Type)\n\t\t}\n\n\t\tswitch instance.Spec.CNI.IPAM.Type {\n\t\tcase operatorv1.IPAMPluginHostLocal:\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"spec.cni.ipam.type %s is not compatible with spec.cni.type %s, valid IPAM values %s\",\n\t\t\t\tinstance.Spec.CNI.IPAM.Type, instance.Spec.CNI.Type, operatorv1.IPAMPluginHostLocal)\n\t\t}\n\tcase operatorv1.PluginAmazonVPC:\n\t\t// The AmazonVPC CNI plugin is only supported on EKS or BYO.\n\t\tswitch instance.Spec.KubernetesProvider {\n\t\tcase operatorv1.ProviderEKS, \"\":\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"spec.kubernetesProvider %s is not compatible with spec.cni.type %s\",\n\t\t\t\tinstance.Spec.KubernetesProvider, instance.Spec.CNI.Type)\n\t\t}\n\n\t\tswitch instance.Spec.CNI.IPAM.Type {\n\t\tcase operatorv1.IPAMPluginAmazonVPC:\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"spec.cni.ipam.type %s is not compatible with spec.cni.type %s, valid IPAM values %s\",\n\t\t\t\tinstance.Spec.CNI.IPAM.Type, instance.Spec.CNI.Type, operatorv1.IPAMPluginAmazonVPC)\n\t\t}\n\tcase operatorv1.PluginAzureVNET:\n\t\t// The AzureVNET CNI plugin is only supported on AKS or BYO.\n\t\tswitch instance.Spec.KubernetesProvider {\n\t\tcase operatorv1.ProviderAKS, \"\":\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"spec.kubernetesProvider %s is not compatible with spec.cni.type %s\",\n\t\t\t\tinstance.Spec.KubernetesProvider, instance.Spec.CNI.Type)\n\t\t}\n\n\t\tswitch instance.Spec.CNI.IPAM.Type {\n\t\tcase operatorv1.IPAMPluginAzureVNET:\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"spec.cni.ipam.type %s is not compatible with spec.cni.type %s, valid IPAM values %s\",\n\t\t\t\tinstance.Spec.CNI.IPAM.Type, instance.Spec.CNI.Type, operatorv1.IPAMPluginAzureVNET)\n\t\t}\n\tdefault:\n\t\t// The specified CNI plugin is not supported by this version of the operator.\n\t\treturn fmt.Errorf(\"Invalid value '%s' for spec.cni.type, it should be one of %s\",\n\t\t\tinstance.Spec.CNI.Type, strings.Join(operatorv1.CNIPluginTypesString, \",\"))\n\t}\n\n\t// Verify Calico settings, if specified.\n\tif instance.Spec.CalicoNetwork != nil {\n\n\t\tnPools := len(instance.Spec.CalicoNetwork.IPPools)\n\t\tif nPools > 2 {\n\t\t\treturn fmt.Errorf(\"only one IPPool per version is allowed\")\n\t\t}\n\n\t\tv4pool := render.GetIPv4Pool(instance.Spec.CalicoNetwork.IPPools)\n\t\tv6pool := render.GetIPv6Pool(instance.Spec.CalicoNetwork.IPPools)\n\n\t\tif nPools == 2 {\n\t\t\tif v4pool == nil {\n\t\t\t\treturn fmt.Errorf(\"multiple IPv6 pools detected: only one IPPool per version is allowed\")\n\t\t\t}\n\t\t\tif v6pool == nil {\n\t\t\t\treturn fmt.Errorf(\"multiple IPv4 IPPools detected: only one IPPool per version is allowed\")\n\t\t\t}\n\t\t}\n\n\t\tif v4pool != nil {\n\t\t\t_, cidr, err := net.ParseCIDR(v4pool.CIDR)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"ipPool.CIDR(%s) is invalid: %s\", v4pool.CIDR, err)\n\t\t\t}\n\n\t\t\tif instance.Spec.CNI.Type == operatorv1.PluginCalico {\n\t\t\t\t// Verify the specified encapsulation type is valid.\n\t\t\t\tswitch v4pool.Encapsulation {\n\t\t\t\tcase operatorv1.EncapsulationIPIP, operatorv1.EncapsulationIPIPCrossSubnet:\n\t\t\t\t\t// IPIP currently requires BGP to be running in order to program routes.\n\t\t\t\t\tif instance.Spec.CalicoNetwork.BGP == nil || *instance.Spec.CalicoNetwork.BGP == operatorv1.BGPDisabled {\n\t\t\t\t\t\treturn fmt.Errorf(\"IPIP encapsulation requires that BGP is enabled\")\n\t\t\t\t\t}\n\t\t\t\tcase operatorv1.EncapsulationVXLAN, operatorv1.EncapsulationVXLANCrossSubnet:\n\t\t\t\tcase operatorv1.EncapsulationNone:\n\t\t\t\t\t// Unencapsulated currently requires BGP to be running in order to program routes.\n\t\t\t\t\tif instance.Spec.CalicoNetwork.BGP == nil || *instance.Spec.CalicoNetwork.BGP == operatorv1.BGPDisabled {\n\t\t\t\t\t\treturn fmt.Errorf(\"Unencapsulated IP pools require that BGP is enabled\")\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn fmt.Errorf(\"%s is invalid for ipPool.encapsulation, should be one of %s\",\n\t\t\t\t\t\tv4pool.Encapsulation, strings.Join(operatorv1.EncapsulationTypesString, \",\"))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Verify the specified encapsulation type is valid.\n\t\t\t\tswitch v4pool.Encapsulation {\n\t\t\t\tcase operatorv1.EncapsulationNone:\n\t\t\t\tdefault:\n\t\t\t\t\treturn fmt.Errorf(\"%s is invalid for ipPool.encapsulation when using non-Calico CNI, should be None\",\n\t\t\t\t\t\tv4pool.Encapsulation)\n\t\t\t\t}\n\t\t\t\tif instance.Spec.CalicoNetwork.BGP != nil && *instance.Spec.CalicoNetwork.BGP == operatorv1.BGPEnabled {\n\t\t\t\t\treturn fmt.Errorf(\"BGP is not supported when using non-Calico CNI\")\n\t\t\t\t}\n\t\t\t\tif v4pool.NodeSelector != \"all()\" {\n\t\t\t\t\treturn fmt.Errorf(\"ipPool.nodeSelector (%s) should be 'all()'\", v4pool.NodeSelector)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif v4pool.NodeSelector == \"\" {\n\t\t\t\treturn fmt.Errorf(\"ipPool.nodeSelector should not be empty\")\n\t\t\t}\n\n\t\t\t// Verify NAT outgoing values.\n\t\t\tswitch v4pool.NATOutgoing {\n\t\t\tcase operatorv1.NATOutgoingEnabled, operatorv1.NATOutgoingDisabled:\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"%s is invalid for ipPool.natOutgoing, should be one of %s\",\n\t\t\t\t\tv4pool.NATOutgoing, strings.Join(operatorv1.NATOutgoingTypesString, \",\"))\n\t\t\t}\n\n\t\t\tif v4pool.BlockSize != nil {\n\t\t\t\tif *v4pool.BlockSize > 32 || *v4pool.BlockSize < 20 {\n\t\t\t\t\treturn fmt.Errorf(\"ipPool.blockSize must be greater than 19 and less than or equal to 32\")\n\n\t\t\t\t}\n\n\t\t\t\t// Verify that the CIDR contains the blocksize.\n\t\t\t\tones, _ := cidr.Mask.Size()\n\t\t\t\tif int32(ones) > *v4pool.BlockSize {\n\t\t\t\t\treturn fmt.Errorf(\"IP pool size is too small. It must be equal to or greater than the block size.\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif v6pool != nil {\n\t\t\t_, cidr, err := net.ParseCIDR(v6pool.CIDR)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"ipPool.CIDR(%s) is invalid: %s\", v6pool.CIDR, err)\n\t\t\t}\n\n\t\t\tif v6pool.Encapsulation != operatorv1.EncapsulationNone {\n\t\t\t\treturn fmt.Errorf(\"Encapsulation is not supported by IPv6 pools, but it is set for %s\", v6pool.CIDR)\n\t\t\t}\n\n\t\t\t// Verify NAT outgoing values.\n\t\t\tswitch v6pool.NATOutgoing {\n\t\t\tcase operatorv1.NATOutgoingEnabled, operatorv1.NATOutgoingDisabled:\n\t\t\t\t// Valid.\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"%s is invalid for ipPool.natOutgoing, should be one of %s\",\n\t\t\t\t\tv6pool.NATOutgoing, strings.Join(operatorv1.NATOutgoingTypesString, \",\"))\n\t\t\t}\n\n\t\t\tif instance.Spec.CNI.Type != operatorv1.PluginCalico {\n\t\t\t\tif v6pool.NodeSelector != \"all()\" {\n\t\t\t\t\treturn fmt.Errorf(\"ipPool.nodeSelector (%s) should be 'all()' when using non-Calico CNI plugin\", v6pool.NodeSelector)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif v6pool.NodeSelector == \"\" {\n\t\t\t\treturn fmt.Errorf(\"ipPool.nodeSelector should not be empty\")\n\t\t\t}\n\n\t\t\tif v6pool.BlockSize != nil {\n\t\t\t\tif *v6pool.BlockSize > 128 || *v6pool.BlockSize < 116 {\n\t\t\t\t\treturn fmt.Errorf(\"ipPool.blockSize must be greater than 115 and less than or equal to 128\")\n\t\t\t\t}\n\n\t\t\t\t// Verify that the CIDR contains the blocksize.\n\t\t\t\tones, _ := cidr.Mask.Size()\n\t\t\t\tif int32(ones) > *v6pool.BlockSize {\n\t\t\t\t\treturn fmt.Errorf(\"IP pool size is too small. It must be equal to or greater than the block size.\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif instance.Spec.CalicoNetwork.NodeAddressAutodetectionV4 != nil {\n\t\t\terr := validateNodeAddressDetection(instance.Spec.CalicoNetwork.NodeAddressAutodetectionV4)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif instance.Spec.CalicoNetwork.NodeAddressAutodetectionV6 != nil {\n\t\t\terr := validateNodeAddressDetection(instance.Spec.CalicoNetwork.NodeAddressAutodetectionV6)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif instance.Spec.CalicoNetwork.HostPorts != nil {\n\t\t\tif instance.Spec.CNI.Type != operatorv1.PluginCalico {\n\t\t\t\treturn fmt.Errorf(\"spec.calicoNetwork.hostPorts is supported only for Calico CNI\")\n\t\t\t}\n\t\t\terr := validateHostPorts(instance.Spec.CalicoNetwork.HostPorts)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif instance.Spec.CalicoNetwork.MultiInterfaceMode != nil {\n\t\t\tif instance.Spec.CNI.Type != operatorv1.PluginCalico {\n\t\t\t\treturn fmt.Errorf(\"spec.calicoNetwork.multiInterfaceMode is supported only for Calico CNI\")\n\t\t\t}\n\t\t}\n\n\t\tif instance.Spec.CalicoNetwork.ContainerIPForwarding != nil {\n\t\t\tif instance.Spec.CNI.Type != operatorv1.PluginCalico {\n\t\t\t\treturn fmt.Errorf(\"spec.calicoNetwork.containerIPForwarding is supported only for Calico CNI\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// Verify that the flexvolume path is valid - either \"None\" (to disable) or a valid absolute path.\n\tif instance.Spec.FlexVolumePath != \"None\" && !path.IsAbs(instance.Spec.FlexVolumePath) {\n\t\treturn fmt.Errorf(\"Installation spec.FlexVolumePath '%s' is not an absolute path\",\n\t\t\tinstance.Spec.FlexVolumePath)\n\t}\n\n\t// We only support RollingUpdate for the node daemonset strategy.\n\tif instance.Spec.NodeUpdateStrategy.Type != appsv1.RollingUpdateDaemonSetStrategyType {\n\t\treturn fmt.Errorf(\"Installation spec.NodeUpdateStrategy.type '%s' is not supported\",\n\t\t\tinstance.Spec.NodeUpdateStrategy.RollingUpdate)\n\t}\n\n\tif instance.Spec.ControlPlaneNodeSelector != nil {\n\t\tif v, ok := instance.Spec.ControlPlaneNodeSelector[\"beta.kubernetes.io/os\"]; ok && v != \"linux\" {\n\t\t\treturn fmt.Errorf(\"Installation spec.ControlPlaneNodeSelector 'beta.kubernetes.io/os=%s' is not supported\", v)\n\t\t}\n\t\tif v, ok := instance.Spec.ControlPlaneNodeSelector[\"kubernetes.io/os\"]; ok && v != \"linux\" {\n\t\t\treturn fmt.Errorf(\"Installation spec.ControlPlaneNodeSelector 'kubernetes.io/os=%s' is not supported\", v)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func ValidateCRD(client *clientset.Clientset, crdName string) error {\n\treturn wait.PollImmediate(retryInterval, crdTimeout, func() (bool, error) {\n\t\tcrd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(context.TODO(), crdName, metav1.GetOptions{})\n\t\tif errors.IsNotFound(err) {\n\t\t\treturn false, nil\n\t\t} else if err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, cond := range crd.Status.Conditions {\n\t\t\tswitch cond.Type {\n\t\t\tcase apiextensionsv1beta1.Established:\n\t\t\t\tif cond.Status == apiextensionsv1beta1.ConditionTrue {\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\t\t\tcase apiextensionsv1beta1.NamesAccepted:\n\t\t\t\tif cond.Status == apiextensionsv1beta1.ConditionFalse {\n\t\t\t\t\treturn false, fmt.Errorf(\"name conflict: %v\", cond.Reason)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t})\n}",
"func ValidateResourcesExist(client kclient.ClientInterface, devfileObj parser.DevfileObj, k8sComponents []devfile.Component, context string) error {\n\tif len(k8sComponents) == 0 {\n\t\treturn nil\n\t}\n\n\tvar unsupportedResources []string\n\tfor _, c := range k8sComponents {\n\t\tkindErr, err := ValidateResourcesExistInK8sComponent(client, devfileObj, c, context)\n\t\tif err != nil {\n\t\t\tif kindErr != \"\" {\n\t\t\t\tunsupportedResources = append(unsupportedResources, kindErr)\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(unsupportedResources) > 0 {\n\t\t// tell the user about all the unsupported resources in one message\n\t\treturn fmt.Errorf(\"following resource(s) in the devfile are not supported by your cluster; please install corresponding Operator(s) before doing \\\"odo dev\\\": %s\", strings.Join(unsupportedResources, \", \"))\n\t}\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewDir creates a new Interface that converts the return value of a Directory's Readdir method into 9P Stat structures. | func NewDir(dir Directory, abspath string, pool *qidpool.Pool) Interface {
return &dirReader{
Directory: dir,
pool: pool,
path: abspath,
}
} | [
"func newDir(name string, attr plugin.EntryAttributes, impl Interface, path string) *dir {\n\tvd := &dir{\n\t\tEntryBase: plugin.NewEntry(name),\n\t\timpl: impl,\n\t\tpath: path,\n\t}\n\tvd.SetAttributes(attr)\n\tvd.SetTTLOf(plugin.OpenOp, 60*time.Second)\n\t// Caching handled in List based on 'impl'.\n\tvd.DisableCachingFor(plugin.ListOp)\n\n\treturn vd\n}",
"func newDir(name string, attr plugin.EntryAttributes, impl Interface, path string) *dir {\n\tvd := &dir{\n\t\tEntryBase: plugin.NewEntry(name),\n\t}\n\tvd.impl = impl\n\tvd.path = path\n\tvd.SetAttributes(attr)\n\treturn vd\n}",
"func (ld *LocalDir) newDir() (*LocalDir, error) {\n\tpath := filepath.Join(ld.path, fmt.Sprintf(\"dir-%s\", hex.EncodeToString(fastrand.Bytes(4))))\n\treturn &LocalDir{path: path}, os.MkdirAll(path, 0777)\n}",
"func newDirectory(root string, maxDepth int) *Directory {\n\td, err := os.Lstat(root);\n\tif err != nil || !isPkgDir(d) {\n\t\treturn nil\n\t}\n\treturn newDirTree(root, d.Name, 0, maxDepth);\n}",
"func newDirFileInfo(name string) os.FileInfo {\n\treturn &bindataFileInfo{\n\t\tname: name,\n\t\tsize: 0,\n\t\tmode: os.FileMode(2147484068), // equal os.FileMode(0644)|os.ModeDir\n\t\tmodTime: time.Time{}}\n}",
"func NewDirTracker(dir string, newEntry func(string) (DirectoryTrackerInterface, error)) *DirTracker {\n\tnumOutsanding := NumTrackerOutstanding // FIXME expose this\n\tvar dt DirTracker\n\tdt.dm = make(map[string]DirectoryTrackerInterface)\n\tdt.newEntry = newEntry\n\tdt.tokenChan = makeTokenChan(numOutsanding)\n\tdt.wg = new(sync.WaitGroup)\n\tdt.errChan = make(chan error)\n\tdt.wg.Add(1)\n\tgo dt.populateDircount(dir)\n\tgo func() {\n\t\terr := filepath.WalkDir(dir, dt.directoryWalker)\n\t\tif err != nil {\n\t\t\tdt.errChan <- err\n\t\t}\n\t\tfor _, val := range dt.dm {\n\t\t\tval.Close()\n\t\t}\n\t\tdt.wg.Wait()\n\t\tdt.finished.Set()\n\t\tclose(dt.errChan)\n\t\tclose(dt.tokenChan)\n\t}()\n\n\treturn &dt\n}",
"func (f *Fs) newDir(dir fs.Directory) fs.Directory {\n\treturn dir // We're using the same dir\n}",
"func NewDir(dir string) http.Handler {\n\treturn New(Config{Dir: dir})\n}",
"func newDirectory(dns, prefix string) *directory {\n\treturn &directory{prefix: prefix, dns: dns}\n}",
"func NewSearchDir() *SearchDir {\n newObj := SearchDir {\n DoneChan: make(chan bool),\n ErrChan: make(chan string),\n FileChan: make(chan string),\n }\n\n return &newObj\n}",
"func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {\n\tnewDir := fs.NewDirCopy(ctx, dir)\n\tremote := dir.Remote()\n\tdecryptedRemote, err := f.cipher.DecryptDirName(remote)\n\tif err != nil {\n\t\tfs.Debugf(remote, \"Undecryptable dir name: %v\", err)\n\t} else {\n\t\tnewDir.SetRemote(decryptedRemote)\n\t}\n\treturn newDir\n}",
"func (gr *HexGrid) NewDir() HexDir {\n\treturn HexDir(rand.Int31n(6))\n}",
"func NewDirectoryIndex(rdr io.Reader) (DirIndex, error) {\n\tvar index DirIndex\n\terr := binary.Read(rdr, binary.LittleEndian, &index.DirIndexInit)\n\tif err != nil {\n\t\treturn index, err\n\t}\n\ttmp := make([]byte, index.NameSize+1, index.NameSize+1)\n\terr = binary.Read(rdr, binary.LittleEndian, &tmp)\n\tif err != nil {\n\t\treturn index, err\n\t}\n\tindex.Name = string(tmp)\n\treturn index, nil\n}",
"func newDirHandle(d *Dir) *DirHandle {\n\treturn &DirHandle{\n\t\td: d,\n\t}\n}",
"func New() DirTree {\n\treturn make(DirTree)\n}",
"func (*FileSystemBase) Readdir(path string,\n\tfill func(name string, stat *Stat_t, ofst int64) bool,\n\tofst int64,\n\tfh uint64) int {\n\treturn -ENOSYS\n}",
"func Dir(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"dir\", Attributes: attrs, Children: children}\n}",
"func newTestFileSystemWithDir(name string) (*DirNode, *FileSystem, error) {\n\tdir := testDir(name)\n\tfs := newTestFileSystem(dir)\n\tsp := modules.RandomSiaPath()\n\tif err := fs.NewSiaDir(sp, modules.DefaultDirPerm); err != nil {\n\t\tpanic(err) // Reflect behavior of newTestFileSystemWithFile.\n\t}\n\tsd, err := fs.OpenSiaDir(sp)\n\treturn sd, fs, err\n}",
"func (dTreeOp DirTreeOp) New() DirTreeOp {\n newDTreeOp := DirTreeOp{}\n newDTreeOp.ErrReturns = make([]error, 0, 100)\n return newDTreeOp\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
TestConcurrentBuildControllers tests the transition of a build from new to pending. Ensures that only a single New > Pending transition happens and that only a single pod is created during a set period of time. | func TestConcurrentBuildControllers(t *testing.T) {
defer testutil.DumpEtcdOnFailure(t)
// Start a master with multiple BuildControllers
osClient, kClient := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)
build.RunBuildControllerTest(t, osClient, kClient)
} | [
"func TestConcurrentBuildPodControllers(t *testing.T) {\n\tdefer testutil.DumpEtcdOnFailure(t)\n\t// Start a master with multiple BuildPodControllers\n\tosClient, kClient := setupBuildControllerTest(controllerCount{BuildPodControllers: 5}, t)\n\tbuild.RunBuildPodControllerTest(t, osClient, kClient)\n}",
"func TestConcurrentBuildControllersPodSync(t *testing.T) {\n\t// Start a master with multiple BuildControllers\n\tbuildClient, _, kClient, fn := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)\n\tdefer fn()\n\tbuild.RunBuildControllerPodSyncTest(t, buildClient, kClient)\n}",
"func TestTriggerController(t *testing.T) {\n\tconfig, stopFn := framework.RunControlPlane(t)\n\tdefer stopFn()\n\n\tctx, cancel := context.WithTimeout(context.TODO(), time.Second*20)\n\tdefer cancel()\n\n\tfakeClock := &fakeclock.FakeClock{}\n\t// Build, instantiate and run the trigger controller.\n\tkubeClient, factory, cmCl, cmFactory := framework.NewClients(t, config)\n\n\tnamespace := \"testns\"\n\n\t// Create Namespace\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}\n\t_, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tctrl, queue, mustSync := trigger.NewController(logf.Log, cmCl, factory, cmFactory, framework.NewEventRecorder(t), fakeClock, policies.NewTriggerPolicyChain(fakeClock))\n\tc := controllerpkg.NewController(\n\t\tcontext.Background(),\n\t\t\"trigger_test\",\n\t\tmetrics.New(logf.Log),\n\t\tctrl.ProcessItem,\n\t\tmustSync,\n\t\tnil,\n\t\tqueue,\n\t)\n\tstopController := framework.StartInformersAndController(t, factory, cmFactory, c)\n\tdefer stopController()\n\n\t// Create a Certificate resource and wait for it to have the 'Issuing' condition.\n\tcert, err := cmCl.CertmanagerV1().Certificates(namespace).Create(ctx, &cmapi.Certificate{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"testcrt\", Namespace: \"testns\"},\n\t\tSpec: cmapi.CertificateSpec{\n\t\t\tSecretName: \"example\",\n\t\t\tCommonName: \"example.com\",\n\t\t\tIssuerRef: cmmeta.ObjectReference{Name: \"testissuer\"}, // doesn't need to exist\n\t\t},\n\t}, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = wait.Poll(time.Millisecond*100, time.Second*5, func() (done bool, err error) {\n\t\tc, err := cmCl.CertmanagerV1().Certificates(cert.Namespace).Get(ctx, cert.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tt.Logf(\"Failed to fetch Certificate resource, retrying: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tif !apiutil.CertificateHasCondition(c, cmapi.CertificateCondition{\n\t\t\tType: cmapi.CertificateConditionIssuing,\n\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t}) {\n\t\t\tt.Logf(\"Certificate does not have expected condition, got=%#v\", apiutil.GetCertificateCondition(c, cmapi.CertificateConditionIssuing))\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {\n\tcontainerImage = trimDockerRegistry(containerImage)\n\tgetPodsTemplate := \"--template={{range.items}}{{.metadata.name}} {{end}}\"\n\n\tgetContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . \"status\" \"containerStatuses\")}}{{range .status.containerStatuses}}{{if (and (eq .name \"%s\") (exists . \"state\" \"running\"))}}true{{end}}{{end}}{{end}}`, containername)\n\n\tgetImageTemplate := fmt.Sprintf(`--template={{if (exists . \"spec\" \"containers\")}}{{range .spec.containers}}{{if eq .name \"%s\"}}{{.image}}{{end}}{{end}}{{end}}`, containername)\n\n\tginkgo.By(fmt.Sprintf(\"waiting for all containers in %s pods to come up.\", testname)) //testname should be selector\nwaitLoop:\n\tfor start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {\n\t\tgetPodsOutput := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", \"-o\", \"template\", getPodsTemplate, \"-l\", testname)\n\t\tpods := strings.Fields(getPodsOutput)\n\t\tif numPods := len(pods); numPods != replicas {\n\t\t\tginkgo.By(fmt.Sprintf(\"Replicas for %s: expected=%d actual=%d\", testname, replicas, numPods))\n\t\t\tcontinue\n\t\t}\n\t\tvar runningPods []string\n\t\tfor _, podID := range pods {\n\t\t\trunning := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getContainerStateTemplate)\n\t\t\tif running != \"true\" {\n\t\t\t\tframework.Logf(\"%s is created but not running\", podID)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tcurrentImage := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getImageTemplate)\n\t\t\tcurrentImage = trimDockerRegistry(currentImage)\n\t\t\tif currentImage != containerImage {\n\t\t\t\tframework.Logf(\"%s is created but running wrong image; expected: %s, actual: %s\", podID, containerImage, currentImage)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\t// Call the generic validator function here.\n\t\t\t// This might validate for example, that (1) getting a url works and (2) url is serving correct content.\n\t\t\tif err := validator(ctx, c, podID); err != nil {\n\t\t\t\tframework.Logf(\"%s is running right image but validator function failed: %v\", podID, err)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tframework.Logf(\"%s is verified up and running\", podID)\n\t\t\trunningPods = append(runningPods, podID)\n\t\t}\n\t\t// If we reach here, then all our checks passed.\n\t\tif len(runningPods) == replicas {\n\t\t\treturn\n\t\t}\n\t}\n\t// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.\n\tframework.Failf(\"Timed out after %v seconds waiting for %s pods to reach valid state\", framework.PodStartTimeout.Seconds(), testname)\n}",
"func TestTriggerController(t *testing.T) {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*40)\n\tdefer cancel()\n\n\tconfig, stopFn := framework.RunControlPlane(t, ctx)\n\tdefer stopFn()\n\n\tfakeClock := &fakeclock.FakeClock{}\n\t// Build, instantiate and run the trigger controller.\n\tkubeClient, factory, cmCl, cmFactory := framework.NewClients(t, config)\n\n\tnamespace := \"testns\"\n\n\t// Create Namespace\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}\n\t_, err := kubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tshouldReissue := policies.NewTriggerPolicyChain(fakeClock).Evaluate\n\tctrl, queue, mustSync := trigger.NewController(logf.Log, cmCl, factory, cmFactory, framework.NewEventRecorder(t), fakeClock, shouldReissue)\n\tc := controllerpkg.NewController(\n\t\tctx,\n\t\t\"trigger_test\",\n\t\tmetrics.New(logf.Log, clock.RealClock{}),\n\t\tctrl.ProcessItem,\n\t\tmustSync,\n\t\tnil,\n\t\tqueue,\n\t)\n\tstopController := framework.StartInformersAndController(t, factory, cmFactory, c)\n\tdefer stopController()\n\n\t// Create a Certificate resource and wait for it to have the 'Issuing' condition.\n\tcert, err := cmCl.CertmanagerV1().Certificates(namespace).Create(ctx, &cmapi.Certificate{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"testcrt\", Namespace: \"testns\"},\n\t\tSpec: cmapi.CertificateSpec{\n\t\t\tSecretName: \"example\",\n\t\t\tCommonName: \"example.com\",\n\t\t\tIssuerRef: cmmeta.ObjectReference{Name: \"testissuer\"}, // doesn't need to exist\n\t\t},\n\t}, metav1.CreateOptions{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = wait.PollImmediateUntil(time.Millisecond*100, func() (done bool, err error) {\n\t\tc, err := cmCl.CertmanagerV1().Certificates(cert.Namespace).Get(ctx, cert.Name, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tt.Logf(\"Failed to fetch Certificate resource, retrying: %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tif !apiutil.CertificateHasCondition(c, cmapi.CertificateCondition{\n\t\t\tType: cmapi.CertificateConditionIssuing,\n\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t}) {\n\t\t\tt.Logf(\"Certificate does not have expected condition, got=%#v\", apiutil.GetCertificateCondition(c, cmapi.CertificateConditionIssuing))\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\t}, ctx.Done())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func TestNewController(t *testing.T) {\n\tmessagingClientSet, err := clientset.NewForConfig(&rest.Config{})\n\tif err != nil {\n\t\tt.Fail()\n\t}\n\n\tmessagingInformerFactory := informers.NewSharedInformerFactory(messagingClientSet, 0)\n\tnatssChannelInformer := messagingInformerFactory.Messaging().V1alpha1().NatssChannels()\n\n\tc := NewController(reconciler.Options{\n\t\tKubeClientSet: fakekubeclientset.NewSimpleClientset(),\n\t\tDynamicClientSet: nil,\n\t\tNatssClientSet: nil,\n\t\tRecorder: nil,\n\t\tStatsReporter: nil,\n\t\tConfigMapWatcher: nil,\n\t\tLogger: logtesting.TestLogger(t),\n\t\tResyncPeriod: 0,\n\t\tStopChannel: nil,\n\t}, dispatchertesting.NewDispatcherDoNothing(), natssChannelInformer)\n\tif c == nil {\n\t\tt.Errorf(\"unable to create dispatcher controller\")\n\t}\n}",
"func TestController(t *testing.T) {\n\tctx, _ := rtesting.SetupFakeContext(t)\n\tctx, cancel := context.WithTimeout(ctx, 30*time.Second)\n\tdefer cancel()\n\n\t// Create reconcilers, start controller.\n\tresults := test.NewResultsClient(t)\n\n\ttrctrl := taskrun.NewController(ctx, results)\n\tprctrl := pipelinerun.NewController(ctx, results)\n\tgo controller.StartAll(ctx, trctrl, prctrl)\n\n\t// Start informers - this notifies the controller of new events.\n\tgo taskruninformer.Get(ctx).Informer().Run(ctx.Done())\n\tgo pipelineruninformer.Get(ctx).Informer().Run(ctx.Done())\n\n\tpipeline := fakepipelineclient.Get(ctx)\n\tt.Run(\"taskrun\", func(t *testing.T) {\n\t\ttr := &v1beta1.TaskRun{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\tKind: \"TaskRun\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"taskrun\",\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"demo\": \"demo\",\n\t\t\t\t\t// This TaskRun belongs to a PipelineRun, so the record should\n\t\t\t\t\t// be associated with the PipelineRun result.\n\t\t\t\t\t\"tekton.dev/pipelineRun\": \"pr\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\t\tKind: \"PipelineRun\",\n\t\t\t\t\tUID: \"pr-id\",\n\t\t\t\t}},\n\t\t\t\tUID: \"tr-id\",\n\t\t\t},\n\t\t}\n\n\t\t// The following is a hack to make the fake clients play nice with\n\t\t// each other. While the controller uses the typed informer that uses\n\t\t// the fake pipeline client to receive events, the controller uses the\n\t\t// fake dynamic client to fetch and update objects during reconcile.\n\t\t// These fake clients store objects independently, so we create the\n\t\t// object in each client to make sure the data is populated in both\n\t\t// places.\n\t\tif _, err := pipeline.TektonV1beta1().TaskRuns(tr.GetNamespace()).Create(tr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tr)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ToUnstructured: %v\", err)\n\t\t}\n\t\t_, err = dynamicinject.Get(ctx).Resource(apis.KindToResource(tr.GroupVersionKind())).Namespace(tr.GetNamespace()).Create(&unstructured.Unstructured{Object: data}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Create: %v\", err)\n\t\t}\n\n\t\twait(ctx, t, tr, \"ns/results/pr-id\")\n\t})\n\n\tt.Run(\"pipelinerun\", func(t *testing.T) {\n\t\tpr := &v1beta1.PipelineRun{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\tKind: \"PipelineRun\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"pr\",\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tAnnotations: map[string]string{\"demo\": \"demo\"},\n\t\t\t\tUID: \"pr-id\",\n\t\t\t},\n\t\t}\n\n\t\t// Same create hack as taskrun (see above).\n\t\tif _, err := pipeline.TektonV1beta1().PipelineRuns(pr.GetNamespace()).Create(pr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pr)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ToUnstructured: %v\", err)\n\t\t}\n\t\t_, err = dynamicinject.Get(ctx).Resource(apis.KindToResource(pr.GroupVersionKind())).Namespace(pr.GetNamespace()).Create(&unstructured.Unstructured{Object: data}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Create: %v\", err)\n\t\t}\n\n\t\twait(ctx, t, pr, \"ns/results/pr-id\")\n\t})\n}",
"func (m *StatusCheckerSuite) TestConcurrency(c *C) {\n\tentry := NewHealthStatusEntry(false)\n\t// check initial healthy status.\n\tc.Assert(entry.IsHealthy(), IsFalse)\n\n\tstopChan := make(chan interface{}, 20)\n\t// separate goroutine to read fields.\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase _, ok := <-stopChan:\n\t\t\t\tif ok {\n\t\t\t\t\t_ = entry.IsHealthy()\n\t\t\t\t} else {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// main goroutine to update fields.\n\tfor i := 0; i < 20; i++ {\n\t\tstopChan <- nil\n\t\toriginalStatus := entry.IsHealthy()\n\t\tentry.UpdateHealthCheckStatus(!originalStatus, 1, 1)\n\t\tstopChan <- nil\n\t}\n\n\tclose(stopChan)\n}",
"func (b *Botanist) WaitForControllersToBeActive(ctx context.Context) error {\n\ttype controllerInfo struct {\n\t\tname string\n\t\tlabels map[string]string\n\t}\n\n\ttype checkOutput struct {\n\t\tcontrollerName string\n\t\tready bool\n\t\terr error\n\t}\n\n\tvar (\n\t\tcontrollers = []controllerInfo{}\n\t\tpollInterval = 5 * time.Second\n\t)\n\n\t// Check whether the kube-controller-manager deployment exists\n\tif err := b.K8sSeedClient.Client().Get(ctx, kutil.Key(b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameKubeControllerManager), &appsv1.Deployment{}); err == nil {\n\t\tcontrollers = append(controllers, controllerInfo{\n\t\t\tname: v1beta1constants.DeploymentNameKubeControllerManager,\n\t\t\tlabels: map[string]string{\n\t\t\t\t\"app\": \"kubernetes\",\n\t\t\t\t\"role\": \"controller-manager\",\n\t\t\t},\n\t\t})\n\t} else if client.IgnoreNotFound(err) != nil {\n\t\treturn err\n\t}\n\n\treturn retry.UntilTimeout(context.TODO(), pollInterval, 90*time.Second, func(ctx context.Context) (done bool, err error) {\n\t\tvar (\n\t\t\twg sync.WaitGroup\n\t\t\tout = make(chan *checkOutput)\n\t\t)\n\n\t\tfor _, controller := range controllers {\n\t\t\twg.Add(1)\n\n\t\t\tgo func(controller controllerInfo) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tpodList := &corev1.PodList{}\n\t\t\t\terr := b.K8sSeedClient.Client().List(ctx, podList,\n\t\t\t\t\tclient.InNamespace(b.Shoot.SeedNamespace),\n\t\t\t\t\tclient.MatchingLabels(controller.labels))\n\t\t\t\tif err != nil {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Check that only one replica of the controller exists.\n\t\t\t\tif len(podList.Items) != 1 {\n\t\t\t\t\tb.Logger.Infof(\"Waiting for %s to have exactly one replica\", controller.name)\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// Check that the existing replica is not in getting deleted.\n\t\t\t\tif podList.Items[0].DeletionTimestamp != nil {\n\t\t\t\t\tb.Logger.Infof(\"Waiting for a new replica of %s\", controller.name)\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Check if the controller is active by reading its leader election record.\n\t\t\t\tleaderElectionRecord, err := common.ReadLeaderElectionRecord(b.K8sShootClient, resourcelock.EndpointsResourceLock, metav1.NamespaceSystem, controller.name)\n\t\t\t\tif err != nil {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, err: err}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif delta := metav1.Now().UTC().Sub(leaderElectionRecord.RenewTime.Time.UTC()); delta <= pollInterval-time.Second {\n\t\t\t\t\tout <- &checkOutput{controllerName: controller.name, ready: true}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tb.Logger.Infof(\"Waiting for %s to be active\", controller.name)\n\t\t\t\tout <- &checkOutput{controllerName: controller.name}\n\t\t\t}(controller)\n\t\t}\n\n\t\tgo func() {\n\t\t\twg.Wait()\n\t\t\tclose(out)\n\t\t}()\n\n\t\tfor result := range out {\n\t\t\tif result.err != nil {\n\t\t\t\treturn retry.SevereError(fmt.Errorf(\"could not check whether controller %s is active: %+v\", result.controllerName, result.err))\n\t\t\t}\n\t\t\tif !result.ready {\n\t\t\t\treturn retry.MinorError(fmt.Errorf(\"controller %s is not active\", result.controllerName))\n\t\t\t}\n\t\t}\n\n\t\treturn retry.Ok()\n\t})\n}",
"func CreateController() controller.Controller {\n\tvar instanceMap = make(map[string]*testServiceInstance)\n\tservices := []*testService{\n\t\tnewTestService(\n\t\t\t\"test-service\",\n\t\t\t\"2f2e85b5-030d-4776-ba7e-e26eb312f10f\",\n\t\t\t\"A test service that only has a single plan\",\n\t\t\t\"35b6030d-f81e-49cd-9d1f-2f5eaec57048\",\n\t\t\tfalse, http.StatusBadRequest, 0, 0, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-provision-fail400\",\n\t\t\t\"308c0400-2edb-45d6-a63e-67f18226a404\",\n\t\t\t\"Provisioning of this service always returns HTTP status 400 (which is a terminal, non-retriable error))\",\n\t\t\t\"44443058-077e-43f3-9857-7ca7efedafd9\",\n\t\t\tfalse, http.StatusBadRequest, failAlways, 0, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-provision-fail500\",\n\t\t\t\"308c0500-2edb-45d6-a63e-67f18226a404\",\n\t\t\t\"Provisioning of this service always returns HTTP status 500 (provisioning never succeeds)\",\n\t\t\t\"525a787c-78d8-42af-8800-e9bf4bd71117\",\n\t\t\tfalse, http.StatusInternalServerError, failAlways, 0, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-provision-fail500-5x\",\n\t\t\t\"389e6500-93f9-49b4-bbe4-76e304cad22c\",\n\t\t\t\"Provisioning of this service fails 5 times, then succeeds.\",\n\t\t\t\"21f83e68-0f4d-4377-bf5a-a5dddfaf7a5c\",\n\t\t\tfalse, http.StatusInternalServerError, 5, 0, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-provision-fail500-5x-deprovision-fail500-5x\",\n\t\t\t\"41f7f500-118c-4f22-a4e9-fc56c02046c0\",\n\t\t\t\"Provisioning of this service fails 5 times, then succeeds; deprovisioning also fails 5 times, then succeeds.\",\n\t\t\t\"1179dfe7-9dbb-4d23-987f-2f722ca4f733\",\n\t\t\tfalse, http.StatusInternalServerError, 5, 0, 5, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-deprovision-fail400\",\n\t\t\t\"43e24400-93ae-4c7d-bfd3-7cd03f051872\",\n\t\t\t\"Provisioning of this service always succeeds, but deprovisiong always fails with error 400 (a non-retriable error).\",\n\t\t\t\"b8e55ea4-05a7-43d6-a0f8-64fbee9e6cc6\",\n\t\t\tfalse, http.StatusBadRequest, 0, 0, failAlways, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-deprovision-fail500\",\n\t\t\t\"43e24500-93ae-4c7d-bfd3-7cd03f051872\",\n\t\t\t\"Provisioning of this service always succeeds, but deprovisiong always fails.\",\n\t\t\t\"27ac655b-864e-4447-8bea-eb38a0e0cf79\",\n\t\t\tfalse, http.StatusInternalServerError, 0, 0, failAlways, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-deprovision-fail500-5x\",\n\t\t\t\"4ed5a500-35ed-4748-be64-5007951373ab\",\n\t\t\t\"Provisioning of this service always succeeds, while deprovisioning fails 5 times, then succeeds.\",\n\t\t\t\"3dab1aa9-4004-4252-b1ff-3d0bff42b36b\",\n\t\t\tfalse, http.StatusInternalServerError, 0, 0, 5, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-update-fail400\",\n\t\t\t\"4efa9400-aafb-4738-94ab-e6e10a2f4af8\",\n\t\t\t\"Update of this service always returns HTTP status 400 (which is a terminal, non-retriable error)\",\n\t\t\t\"e3d738b6-8d5c-4f40-ba5b-2613e02af41d\",\n\t\t\tfalse, http.StatusBadRequest, 0, failAlways, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-update-fail500\",\n\t\t\t\"4efa9500-aafb-4738-94ab-e6e10a2f4af8\",\n\t\t\t\"Update of this service always returns HTTP status 500 (update never succeeds)\",\n\t\t\t\"729c5f1f-aef4-4c38-81db-227993ec24c6\",\n\t\t\tfalse, http.StatusInternalServerError, 0, failAlways, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-update-fail500-5x\",\n\t\t\t\"4f1eb500-6762-4605-917a-cfca0eaa9b01\",\n\t\t\t\"Update of this service fails 5 times, then succeeds.\",\n\t\t\t\"eb5a24ba-69ab-4acb-964a-dcad600ba4d3\",\n\t\t\tfalse, http.StatusInternalServerError, 0, 5, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-async\",\n\t\t\t\"5a680caf-807e-4157-85af-552dc71b72d6\",\n\t\t\t\"A test service that is asynchronously provisioned & deprovisioned\",\n\t\t\t\"4f6741a8-2451-43c7-b473-a4f8e9f89a87\",\n\t\t\ttrue, noHTTPError, 0, 0, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-async-provision-fail\",\n\t\t\t\"7aac9500-c42a-46f4-86d6-df21437d4c7f\",\n\t\t\t\"A test service that is asynchronously provisioned, but provisioning always returns state:failed\",\n\t\t\t\"9aca0b9a-192e-416a-a809-67e592bfa681\",\n\t\t\ttrue, noHTTPError, failAlways, 0, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-async-provision-fail-5x\",\n\t\t\t\"7f73e500-1ba0-4882-94c7-7624b4219520\",\n\t\t\t\"A test service that is asynchronously provisioned; provisioning returns state:failed 5 times, then succeeds.\",\n\t\t\t\"a1027080-966d-4ec3-b4e1-abc3f52b7de2\",\n\t\t\ttrue, noHTTPError, 5, 0, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-async-provision-fail-5x-deprovision-fail-5x\",\n\t\t\t\"86709500-1acb-473b-baa8-899e4dce12dc\",\n\t\t\t\"A test service that is asynchronously provisioned; provisioning returns state:failed 5 times, then succeeds; deprovisioning also returns state:failed 5 times, then succeeds.\",\n\t\t\t\"35234488-830f-4efe-ae16-a36bb0092cce\",\n\t\t\ttrue, noHTTPError, 5, 0, 5, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-async-deprovision-fail\",\n\t\t\t\"9bee1500-e5f7-4bd8-94de-eb65c811be83\",\n\t\t\t\"A test service that is asynchronously provisioned; provisioning always succeeds, deprovisiong always returns state:failed.\",\n\t\t\t\"6096a7e0-7ea6-4782-8246-c6e5d9eb97ca\",\n\t\t\ttrue, noHTTPError, 0, 0, failAlways, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-async-deprovision-fail-5x\",\n\t\t\t\"acddd500-97e5-4c69-99e2-d1a056b1ad25\",\n\t\t\t\"A test service that is asynchronously provisioned; provisioning always succeeds, deprovisioning returns state:failed 5 times, then succeeds.\",\n\t\t\t\"dce5da49-fc42-4490-a053-8415fd569461\",\n\t\t\ttrue, noHTTPError, 0, 0, 5, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-async-update-fail\",\n\t\t\t\"ad6ab500-c287-4090-a9ab-6d49b1204496\",\n\t\t\t\"Update of this service always returns state:failed in the last operation response\",\n\t\t\t\"94f9a5fd-6a99-440d-9315-ddb144755349\",\n\t\t\ttrue, noHTTPError, 0, failAlways, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-async-update-fail-5x\",\n\t\t\t\"aec24500-f8a5-4c95-a02b-92b297bf7805\",\n\t\t\t\"Update of this service returns state:failed 5 times, then succeeds.\",\n\t\t\t\"e11860e1-f62f-4383-9eb4-30d8641fe2f0\",\n\t\t\ttrue, noHTTPError, 0, 5, 0, 0),\n\t\tnewTestService(\n\t\t\t\"test-service-async-last-operation-fail400\",\n\t\t\t\"c594a400-ec7f-494b-a266-d540cf977382\",\n\t\t\t\"A test service that is asynchronously provisioned, but lastOperation always fails with error 400\",\n\t\t\t\"e937e0b6-ddd5-4565-82e2-1cda3d16ad32\",\n\t\t\ttrue, http.StatusBadRequest, 0, 0, 0, failAlways),\n\t\tnewTestService(\n\t\t\t\"test-service-async-last-operation-fail500\",\n\t\t\t\"c594a500-ec7f-494b-a266-d540cf977382\",\n\t\t\t\"A test service that is asynchronously provisioned, but lastOperation always fails with error 500\",\n\t\t\t\"624eea7a-4fb1-4e67-9ec8-379f0c855c3b\",\n\t\t\ttrue, http.StatusInternalServerError, 0, 0, 0, failAlways),\n\t\tnewTestService(\n\t\t\t\"test-service-async-last-operation-fail500-5x\",\n\t\t\t\"cce99500-3f6e-42f1-8100-5408a7b79e43\",\n\t\t\t\"A test service that is asynchronously provisioned, but lastOperation only succeeds on the 5th attempt.\",\n\t\t\t\"4254a380-4e3d-4cc1-b2b6-3c7e55b63ea2\",\n\t\t\ttrue, http.StatusInternalServerError, 0, 0, 0, 5),\n\t\t{\n\t\t\tService: brokerapi.Service{\n\t\t\t\tName: \"test-service-multiple-plans\",\n\t\t\t\tID: \"f1b57a42-8035-4291-a555-51c461df6072\",\n\t\t\t\tDescription: \"A test service\",\n\t\t\t\tPlans: []brokerapi.ServicePlan{{\n\t\t\t\t\tName: \"default\",\n\t\t\t\t\tID: \"06576262-f0d5-11e8-83eb-54ee754ec85f\",\n\t\t\t\t\tDescription: \"Sample plan description\",\n\t\t\t\t\tFree: true,\n\t\t\t\t}, {\n\t\t\t\t\tName: \"premium\",\n\t\t\t\t\tID: \"e251a5bb-3266-4391-bdde-be9e87bffe2f\",\n\t\t\t\t\tDescription: \"Premium plan\",\n\t\t\t\t\tFree: false,\n\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBindable: true,\n\t\t\t\tPlanUpdateable: true,\n\t\t\t},\n\t\t\tDeprovisionFailTimes: 0,\n\t\t},\n\t\t{\n\t\t\tService: brokerapi.Service{\n\t\t\t\tName: \"test-service-with-schemas\",\n\t\t\t\tID: \"f485442d-319b-43d4-80ef-bdf7ae200b09\",\n\t\t\t\tDescription: \"A test service with parameter and response schemas\",\n\t\t\t\tPlans: []brokerapi.ServicePlan{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"default\",\n\t\t\t\t\t\tID: \"0b8e785e-9053-4acf-9eb8-c15f879ff485\",\n\t\t\t\t\t\tDescription: \"Plan with parameter and response schemas\",\n\t\t\t\t\t\tFree: true,\n\t\t\t\t\t\tSchemas: &brokerapi.Schemas{\n\t\t\t\t\t\t\tServiceInstance: &brokerapi.ServiceInstanceSchema{\n\t\t\t\t\t\t\t\tCreate: &brokerapi.InputParametersSchema{\n\t\t\t\t\t\t\t\t\tParameters: map[string]interface{}{ // TODO: use a JSON Schema library instead?\n\t\t\t\t\t\t\t\t\t\t\"$schema\": \"http://json-schema.org/draft-04/schema#\",\n\t\t\t\t\t\t\t\t\t\t\"type\": \"object\",\n\t\t\t\t\t\t\t\t\t\t\"properties\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\"param-1\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\"description\": \"First input parameter\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\"param-2\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\"description\": \"Second input parameter\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tUpdate: &brokerapi.InputParametersSchema{\n\t\t\t\t\t\t\t\t\tParameters: map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\"$schema\": \"http://json-schema.org/draft-04/schema#\",\n\t\t\t\t\t\t\t\t\t\t\"type\": \"object\",\n\t\t\t\t\t\t\t\t\t\t\"properties\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\"param-1\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\"description\": \"First input parameter\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\"param-2\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\"description\": \"Second input parameter\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tServiceBinding: &brokerapi.ServiceBindingSchema{\n\t\t\t\t\t\t\t\tCreate: &brokerapi.RequestResponseSchema{\n\t\t\t\t\t\t\t\t\tInputParametersSchema: brokerapi.InputParametersSchema{\n\t\t\t\t\t\t\t\t\t\tParameters: map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\"$schema\": \"http://json-schema.org/draft-04/schema#\",\n\t\t\t\t\t\t\t\t\t\t\t\"type\": \"object\",\n\t\t\t\t\t\t\t\t\t\t\t\"properties\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\"param-1\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"description\": \"First input parameter\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\"param-2\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"description\": \"Second input parameter\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tResponse: map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\"$schema\": \"http://json-schema.org/draft-04/schema#\",\n\t\t\t\t\t\t\t\t\t\t\"type\": \"object\",\n\t\t\t\t\t\t\t\t\t\t\"properties\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\"credentials\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"object\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"properties\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"special-key-1\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"description\": \"Special key 1\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"special-key-2\": map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"description\": \"Special key 2\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBindable: true,\n\t\t\t\tPlanUpdateable: true,\n\t\t\t},\n\t\t\tDeprovisionFailTimes: 0,\n\t\t},\n\t}\n\n\tvar serviceMap = make(map[string]*testService)\n\tfor _, s := range services {\n\t\tserviceMap[s.ID] = s\n\t}\n\n\treturn &testController{\n\t\tinstanceMap: instanceMap,\n\t\tserviceMap: serviceMap,\n\t\tprovisionCountMap: make(map[string]int),\n\t}\n}",
"func TestController(t *testing.T) {\n\tfakeKubeClient, catalogClient, fakeBrokerCatalog, _, _, testController, _, stopCh := newTestController(t)\n\tdefer close(stopCh)\n\n\tt.Log(fakeKubeClient, catalogClient, fakeBrokerCatalog, testController, stopCh)\n\n\tfakeBrokerCatalog.RetCatalog = &brokerapi.Catalog{\n\t\tServices: []*brokerapi.Service{\n\t\t\t{\n\t\t\t\tName: \"test-service\",\n\t\t\t\tID: \"12345\",\n\t\t\t\tDescription: \"a test service\",\n\t\t\t\tPlans: []brokerapi.ServicePlan{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"test-plan\",\n\t\t\t\t\t\tFree: true,\n\t\t\t\t\t\tID: \"34567\",\n\t\t\t\t\t\tDescription: \"a test plan\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tname := \"test-name\"\n\tbroker := &v1alpha1.Broker{\n\t\tObjectMeta: v1.ObjectMeta{Name: name},\n\t\tSpec: v1alpha1.BrokerSpec{\n\t\t\tURL: \"https://example.com\",\n\t\t},\n\t}\n\tbrokerClient := catalogClient.Servicecatalog().Brokers()\n\n\tbrokerServer, err := brokerClient.Create(broker)\n\tif nil != err {\n\t\tt.Fatalf(\"error creating the broker %q (%q)\", broker, err)\n\t}\n\n\tif err := wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tbrokerServer, err = brokerClient.Get(name)\n\t\t\tif nil != err {\n\t\t\t\treturn false,\n\t\t\t\t\tfmt.Errorf(\"error getting broker %s (%s)\",\n\t\t\t\t\t\tname, err)\n\t\t\t} else if len(brokerServer.Status.Conditions) > 0 {\n\t\t\t\tt.Log(brokerServer)\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t},\n\t); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// check\n\tserviceClassClient := catalogClient.Servicecatalog().ServiceClasses()\n\t_, err = serviceClassClient.Get(\"test-service\")\n\tif nil != err {\n\t\tt.Fatal(\"could not find the test service\", err)\n\t}\n\n\t// cleanup our broker\n\terr = brokerClient.Delete(name, &v1.DeleteOptions{})\n\tif nil != err {\n\t\tt.Fatalf(\"broker should be deleted (%s)\", err)\n\t}\n\n\t// uncomment if/when deleting a broker deletes the associated service\n\t// if class, err := serviceClassClient.Get(\"test-service\"); nil == err {\n\t// \tt.Fatal(\"found the test service that should have been deleted\", err, class)\n\t// }\n}",
"func TestControllerHandleEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\taddServices []*corev1.Service\n\t\tupdateServices []string\n\t\tdelServices []string\n\t\texpAddedServices []string\n\t\texpDeletedServices []string\n\t}{\n\t\t{\n\t\t\tname: \"If a controller is watching services it should react to the service change events.\",\n\t\t\taddServices: []*corev1.Service{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc1\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc2\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tupdateServices: []string{\"svc1\"},\n\t\t\tdelServices: []string{\"svc1\", \"svc2\"},\n\t\t\texpAddedServices: []string{\"svc1\", \"svc2\", \"svc1\"},\n\t\t\texpDeletedServices: []string{\"svc1\", \"svc2\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\tassert := assert.New(t)\n\t\t\tresync := 30 * time.Second\n\t\t\tstopC := make(chan struct{})\n\t\t\tvar gotAddedServices []string\n\t\t\tvar gotDeletedServices []string\n\n\t\t\t// Create the kubernetes client.\n\t\t\tk8scli, _, _, err := cli.GetK8sClients(\"\")\n\n\t\t\trequire.NoError(err, \"kubernetes client is required\")\n\n\t\t\t// Prepare the environment on the cluster.\n\t\t\tprep := prepare.New(k8scli, t)\n\t\t\tprep.SetUp()\n\t\t\tdefer prep.TearDown()\n\n\t\t\t// Create the reitrever.\n\t\t\trt := &retrieve.Resource{\n\t\t\t\tListerWatcher: cache.NewListWatchFromClient(k8scli.CoreV1().RESTClient(), \"services\", prep.Namespace().Name, fields.Everything()),\n\t\t\t\tObject: &corev1.Service{},\n\t\t\t}\n\n\t\t\t// Call times are the number of times the handler should be called before sending the termination signal.\n\t\t\tstopCallTimes := len(test.addServices) + len(test.updateServices) + len(test.delServices)\n\t\t\tcalledTimes := 0\n\t\t\tvar mx sync.Mutex\n\n\t\t\t// Create the handler.\n\t\t\thl := &handler.HandlerFunc{\n\t\t\t\tAddFunc: func(_ context.Context, obj runtime.Object) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\tsvc := obj.(*corev1.Service)\n\t\t\t\t\tgotAddedServices = append(gotAddedServices, svc.Name)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(_ context.Context, id string) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\t// Ignore namespace.\n\t\t\t\t\tid = strings.Split(id, \"/\")[1]\n\t\t\t\t\tgotDeletedServices = append(gotDeletedServices, id)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Create a Pod controller.\n\t\t\tctrl := controller.NewSequential(resync, hl, rt, nil, log.Dummy)\n\t\t\trequire.NotNil(ctrl, \"controller is required\")\n\t\t\tgo ctrl.Run(stopC)\n\n\t\t\t// Create the required services.\n\t\t\tfor _, svc := range test.addServices {\n\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Create(svc)\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\tfor _, svc := range test.updateServices {\n\t\t\t\torigSvc, err := k8scli.CoreV1().Services(prep.Namespace().Name).Get(svc, metav1.GetOptions{})\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\t// Change something\n\t\t\t\t\torigSvc.Spec.Ports = append(origSvc.Spec.Ports, corev1.ServicePort{Name: \"updateport\", Port: 9876})\n\t\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Update(origSvc)\n\t\t\t\t\tassert.NoError(err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Delete the required services.\n\t\t\tfor _, svc := range test.delServices {\n\t\t\t\terr := k8scli.CoreV1().Services(prep.Namespace().Name).Delete(svc, &metav1.DeleteOptions{})\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\t// Wait until we have finished.\n\t\t\tselect {\n\t\t\t// Timeout.\n\t\t\tcase <-time.After(20 * time.Second):\n\t\t\t// Finished.\n\t\t\tcase <-stopC:\n\t\t\t}\n\n\t\t\t// Check.\n\t\t\tassert.Equal(test.expAddedServices, gotAddedServices)\n\t\t\tassert.Equal(test.expDeletedServices, gotDeletedServices)\n\t\t})\n\t}\n}",
"func TestNewCStorPoolController(t *testing.T) {\n\tfakeKubeClient := fake.NewSimpleClientset()\n\tfakeOpenebsClient := openebsFakeClientset.NewSimpleClientset()\n\n\tkubeInformerFactory := kubeinformers.NewSharedInformerFactory(fakeKubeClient, time.Second*30)\n\topenebsInformerFactory := informers.NewSharedInformerFactory(fakeOpenebsClient, time.Second*30)\n\t// Instantiate the controller\n\tcontroller := NewController(fakeKubeClient, fakeOpenebsClient, kubeInformerFactory,\n\t\topenebsInformerFactory)\n\n\tif controller.spcSynced == nil {\n\t\tt.Errorf(\"No spc cache sync in controller object\")\n\t}\n\n\tif controller.workqueue == nil {\n\t\tt.Errorf(\"No workqueue in controller object\")\n\t}\n\tif controller.recorder == nil {\n\t\tt.Errorf(\"No recorder in controller object\")\n\t}\n\tif controller.kubeclientset != fakeKubeClient {\n\t\tt.Errorf(\"SPC controller object's kubeclientset mismatch\")\n\t}\n\tif controller.clientset != fakeOpenebsClient {\n\t\tt.Errorf(\"SPC controller object's openebsclientset mismatch\")\n\t}\n}",
"func TestCancelManyJobs(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\n\tt.Parallel()\n\tc, _ := minikubetestenv.AcquireCluster(t)\n\n\t// Create an input repo\n\trepo := tu.UniqueString(\"TestCancelManyJobs\")\n\trequire.NoError(t, c.CreateRepo(pfs.DefaultProjectName, repo))\n\n\t// Create sleep pipeline\n\tpipeline := tu.UniqueString(\"pipeline\")\n\trequire.NoError(t, c.CreatePipeline(pfs.DefaultProjectName,\n\t\tpipeline,\n\t\t\"\",\n\t\t[]string{\"sleep\", \"600\"},\n\t\tnil,\n\t\t&pps.ParallelismSpec{\n\t\t\tConstant: 1,\n\t\t},\n\t\tclient.NewPFSInput(pfs.DefaultProjectName, repo, \"/*\"),\n\t\t\"\",\n\t\tfalse,\n\t))\n\n\t// Create 10 input commits, to spawn 10 jobs\n\tvar commits []*pfs.Commit\n\tfor i := 0; i < 10; i++ {\n\t\tcommit, err := c.StartCommit(pfs.DefaultProjectName, repo, \"master\")\n\t\trequire.NoError(t, c.PutFile(commit, \"file\", strings.NewReader(\"foo\")))\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, c.FinishCommit(pfs.DefaultProjectName, repo, commit.Branch.Name, commit.Id))\n\t\tcommits = append(commits, commit)\n\t}\n\n\t// For each expected job: watch to make sure the input job comes up, make\n\t// sure that it's the only job running, then cancel it\n\tfor _, commit := range commits {\n\t\t// Wait until PPS has started processing commit\n\t\tvar jobInfo *pps.JobInfo\n\t\trequire.NoErrorWithinT(t, 30*time.Second, func() error {\n\t\t\treturn backoff.Retry(func() error {\n\t\t\t\tjobInfos, err := c.ListJob(pfs.DefaultProjectName, pipeline, []*pfs.Commit{commit}, -1, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif len(jobInfos) != 1 {\n\t\t\t\t\treturn errors.Errorf(\"Expected one job, but got %d: %v\", len(jobInfos), jobInfos)\n\t\t\t\t}\n\t\t\t\tjobInfo = jobInfos[0]\n\t\t\t\treturn nil\n\t\t\t}, backoff.NewTestingBackOff())\n\t\t})\n\n\t\t// Stop the job\n\t\trequire.NoError(t, c.StopJob(pfs.DefaultProjectName, jobInfo.Job.Pipeline.Name, jobInfo.Job.Id))\n\n\t\t// Check that the job is now killed\n\t\trequire.NoErrorWithinT(t, 30*time.Second, func() error {\n\t\t\treturn backoff.Retry(func() error {\n\t\t\t\t// TODO(msteffen): once github.com/pachyderm/pachyderm/v2/pull/2642 is\n\t\t\t\t// submitted, change ListJob here to filter on commit1 as the input commit,\n\t\t\t\t// rather than inspecting the input in the test\n\t\t\t\tupdatedJobInfo, err := c.InspectJob(pfs.DefaultProjectName, jobInfo.Job.Pipeline.Name, jobInfo.Job.Id, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif updatedJobInfo.State != pps.JobState_JOB_KILLED {\n\t\t\t\t\treturn errors.Errorf(\"job %s is still running, but should be KILLED\", jobInfo.Job.Id)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, backoff.NewTestingBackOff())\n\t\t})\n\t}\n}",
"func TestReplicationConcurrentPush(t *testing.T) {\n\n\tbase.RequireNumTestBuckets(t, 2)\n\tbase.SetUpTestLogging(t, logger.LevelInfo, logger.KeyAll)\n\n\t// Disable sequence batching for multi-RT tests (pending CBG-1000)\n\tdefer db.SuspendSequenceBatching()()\n\n\t// Increase checkpoint persistence frequency for cross-node status verification\n\tdefer reduceTestCheckpointInterval(50 * time.Millisecond)()\n\n\tactiveRT, remoteRT, remoteURLString, teardown := setupSGRPeers(t)\n\tdefer teardown()\n\t// Create push replications, verify running\n\tactiveRT.createReplication(\"rep_ABC\", remoteURLString, db.ActiveReplicatorTypePush, []string{\"ABC\"}, true, db.ConflictResolverDefault)\n\tactiveRT.waitForReplicationStatus(\"rep_ABC\", db.ReplicationStateRunning)\n\tactiveRT.createReplication(\"rep_DEF\", remoteURLString, db.ActiveReplicatorTypePush, []string{\"DEF\"}, true, db.ConflictResolverDefault)\n\tactiveRT.waitForReplicationStatus(\"rep_DEF\", db.ReplicationStateRunning)\n\n\t// Create docs on active\n\tdocAllChannels1 := t.Name() + \"All1\"\n\tdocAllChannels2 := t.Name() + \"All2\"\n\t_ = activeRT.putDoc(docAllChannels1, `{\"source\":\"activeRT1\",\"channels\":[\"ABC\",\"DEF\"]}`)\n\t_ = activeRT.putDoc(docAllChannels2, `{\"source\":\"activeRT2\",\"channels\":[\"ABC\",\"DEF\"]}`)\n\n\t// wait for documents to be pushed to remote\n\tchangesResults := remoteRT.RequireWaitChanges(2, \"0\")\n\tchangesResults.requireDocIDs(t, []string{docAllChannels1, docAllChannels2})\n\n\t// wait for both replications to have pushed, and total pushed to equal 2\n\tassert.NoError(t, activeRT.WaitForCondition(func() bool {\n\t\tabcStatus := activeRT.GetReplicationStatus(\"rep_ABC\")\n\t\tif abcStatus.DocsCheckedPush != 2 {\n\t\t\tlog.Printf(\"abcStatus.DocsCheckedPush not 2, is %v\", abcStatus.DocsCheckedPush)\n\t\t\tlog.Printf(\"abcStatus=%+v\", abcStatus)\n\t\t\treturn false\n\t\t}\n\t\tdefStatus := activeRT.GetReplicationStatus(\"rep_DEF\")\n\t\tif defStatus.DocsCheckedPush != 2 {\n\t\t\tlog.Printf(\"defStatus.DocsCheckedPush not 2, is %v\", defStatus.DocsCheckedPush)\n\t\t\tlog.Printf(\"defStatus=%+v\", defStatus)\n\t\t\treturn false\n\t\t}\n\n\t\t// DocsWritten is incremented on a successful write, but ALSO in the race scenario where the remote responds\n\t\t// to the changes message to say it needs the rev, but then receives the rev from another source. This means that\n\t\t// in this test, DocsWritten can be any value between 0 and 2 for each replication, but should be at least 2\n\t\t// for both replications\n\t\ttotalDocsWritten := abcStatus.DocsWritten + defStatus.DocsWritten\n\t\tif totalDocsWritten < 2 || totalDocsWritten > 4 {\n\t\t\tlog.Printf(\"Total docs written is not between 2 and 4, is abc=%v, def=%v\", abcStatus.DocsWritten, defStatus.DocsWritten)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}))\n\n\t// Validate doc contents\n\tdocAll1Body := remoteRT.getDoc(docAllChannels1)\n\tassert.Equal(t, \"activeRT1\", docAll1Body[\"source\"])\n\tdocAll2Body := remoteRT.getDoc(docAllChannels2)\n\tassert.Equal(t, \"activeRT2\", docAll2Body[\"source\"])\n\n}",
"func deployControllers(ctx context.Context, ready chan ktfkind.ProxyReadinessEvent, cluster kind.Cluster, containerImage, namespace string) error {\n\t// ensure the controller namespace is created\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}\n\tif _, err := cluster.Client().CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}); err != nil {\n\t\tif !errors.IsAlreadyExists(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// run the controller in the background\n\tgo func() {\n\t\t// pull the readiness event for the proxy\n\t\tevent := <-ready\n\n\t\t// if there's an error, all tests fail here\n\t\tif event.Err != nil {\n\t\t\tpanic(event.Err)\n\t\t}\n\n\t\t// grab the admin hostname and pass the readiness event on to the tests\n\t\tu := event.ProxyAdminURL\n\t\tadminHost := u.Hostname()\n\t\tproxyReadyCh <- event\n\n\t\t// create a tempfile to hold the cluster kubeconfig that will be used for the controller\n\t\tkubeconfig, err := ioutil.TempFile(os.TempDir(), \"kubeconfig-\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer os.Remove(kubeconfig.Name())\n\n\t\t// dump the kubeconfig from kind into the tempfile\n\t\tgenerateKubeconfig := exec.CommandContext(ctx, \"kind\", \"get\", \"kubeconfig\", \"--name\", cluster.Name())\n\t\tgenerateKubeconfig.Stdout = kubeconfig\n\t\tgenerateKubeconfig.Stderr = os.Stderr\n\t\tif err := generateKubeconfig.Run(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tkubeconfig.Close()\n\n\t\t// deploy our CRDs to the cluster\n\t\tfor _, crd := range crds {\n\t\t\tcmd := exec.CommandContext(ctx, \"kubectl\", \"--kubeconfig\", kubeconfig.Name(), \"apply\", \"-f\", crd)\n\t\t\tstdout, stderr := new(bytes.Buffer), new(bytes.Buffer)\n\t\t\tcmd.Stdout = stdout\n\t\t\tcmd.Stderr = stderr\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\tfmt.Fprintln(os.Stdout, stdout.String())\n\t\t\t\tpanic(fmt.Errorf(\"%s: %w\", stderr.String(), err))\n\t\t\t}\n\t\t}\n\n\t\t// if set, allow running the legacy controller for the tests instead of the current controller\n\t\tvar cmd *exec.Cmd\n\t\tif useLegacyKIC() {\n\t\t\tcmd = buildLegacyCommand(ctx, kubeconfig.Name(), adminHost, cluster.Client())\n\t\t\tstdout, stderr := new(bytes.Buffer), new(bytes.Buffer)\n\t\t\tcmd.Stdout = io.MultiWriter(stdout, os.Stdout)\n\t\t\tcmd.Stderr = io.MultiWriter(stderr, os.Stderr)\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\tfmt.Fprintln(os.Stdout, stdout.String())\n\t\t\t\tpanic(fmt.Errorf(\"%s: %w\", stderr.String(), err))\n\t\t\t}\n\t\t} else {\n\t\t\tconfig := config.Config{}\n\t\t\tflags := config.FlagSet()\n\t\t\tflags.Parse([]string{\n\t\t\t\tfmt.Sprintf(\"--kong-admin-url=http://%s:8001\", adminHost),\n\t\t\t\tfmt.Sprintf(\"--kubeconfig=%s\", kubeconfig.Name()),\n\t\t\t\t\"--proxy-sync-seconds=0.05\", // run the test updates at 50ms for high speed\n\t\t\t\t\"--controller-kongstate=enabled\",\n\t\t\t\t\"--controller-ingress-networkingv1=enabled\",\n\t\t\t\t\"--controller-ingress-networkingv1beta1=disabled\",\n\t\t\t\t\"--controller-ingress-extensionsv1beta1=disabled\",\n\t\t\t\t\"--controller-udpingress=enabled\",\n\t\t\t\t\"--controller-tcpingress=enabled\",\n\t\t\t\t\"--controller-kongingress=enabled\",\n\t\t\t\t\"--controller-kongclusterplugin=enabled\",\n\t\t\t\t\"--controller-kongplugin=enabled\",\n\t\t\t\t\"--controller-kongconsumer=disabled\",\n\t\t\t\t\"--election-id=integrationtests.konghq.com\",\n\t\t\t\tfmt.Sprintf(\"--watch-namespace=%s\", watchNamespaces),\n\t\t\t\tfmt.Sprintf(\"--ingress-class=%s\", ingressClass),\n\t\t\t\t\"--log-level=trace\",\n\t\t\t\t\"--log-format=text\",\n\t\t\t\t\"--admission-webhook-listen=172.17.0.1:49023\",\n\t\t\t\tfmt.Sprintf(\"--admission-webhook-cert=%s\", admissionWebhookCert),\n\t\t\t\tfmt.Sprintf(\"--admission-webhook-key=%s\", admissionWebhookKey),\n\t\t\t})\n\t\t\tfmt.Fprintf(os.Stderr, \"config: %+v\\n\", config)\n\n\t\t\tif err := rootcmd.Run(ctx, &config); err != nil {\n\t\t\t\tpanic(fmt.Errorf(\"controller manager exited with error: %w\", err))\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func TestKnativeServingDeploymentRecreationReady(t *testing.T) {\n\tcancel := logstream.Start(t)\n\tdefer cancel()\n\tclients := Setup(t)\n\n\tdpList, err := clients.KubeClient.Kube.AppsV1().Deployments(test.ServingOperatorNamespace).List(metav1.ListOptions{})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get any deployment under the namespace %q: %v\",\n\t\t\ttest.ServingOperatorNamespace, err)\n\t}\n\t// Delete the deployments one by one to see if they will be recreated.\n\tfor _, deployment := range dpList.Items {\n\t\tif err := clients.KubeClient.Kube.AppsV1().Deployments(deployment.Namespace).Delete(deployment.Name,\n\t\t\t&metav1.DeleteOptions{}); err != nil {\n\t\t\tt.Fatalf(\"Failed to delete deployment %s/%s: %v\", deployment.Namespace, deployment.Name, err)\n\t\t}\n\t\tif _, err = resources.WaitForDeploymentAvailable(clients, deployment.Name, deployment.Namespace,\n\t\t\tresources.IsDeploymentAvailable); err != nil {\n\t\t\tt.Fatalf(\"The deployment %s/%s failed to reach the desired state: %v\",\n\t\t\t\tdeployment.Namespace, deployment.Name, err)\n\t\t}\n\t\tif _, err := resources.WaitForKnativeServingState(clients.KnativeServingAlphaClient, test.ServingOperatorName,\n\t\t\tresources.IsKnativeServingReady); err != nil {\n\t\t\tt.Fatalf(\"KnativeService %q failed to reach the desired state: %v\", test.ServingOperatorName, err)\n\t\t}\n\t\tt.Logf(\"The deployment %s/%s reached the desired state.\", deployment.Namespace, deployment.Name)\n\t}\n}",
"func runSyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {\n\tdoit := func(t *testing.T, test controllerTest) {\n\t\t// Initialize the controller\n\t\tclient := &fake.Clientset{}\n\t\tctrl, err := newTestController(ctx, client, nil, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test %q construct persistent volume failed: %v\", test.name, err)\n\t\t}\n\t\treactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)\n\t\tfor _, claim := range test.initialClaims {\n\t\t\tif metav1.HasAnnotation(claim.ObjectMeta, annSkipLocalStore) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tctrl.claims.Add(claim)\n\t\t}\n\t\tfor _, volume := range test.initialVolumes {\n\t\t\tif metav1.HasAnnotation(volume.ObjectMeta, annSkipLocalStore) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tctrl.volumes.store.Add(volume)\n\t\t}\n\t\treactor.AddClaims(test.initialClaims)\n\t\treactor.AddVolumes(test.initialVolumes)\n\n\t\t// Inject classes into controller via a custom lister.\n\t\tindexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, class := range storageClasses {\n\t\t\tindexer.Add(class)\n\t\t}\n\t\tctrl.classLister = storagelisters.NewStorageClassLister(indexer)\n\n\t\tpodIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, pod := range pods {\n\t\t\tpodIndexer.Add(pod)\n\t\t\tctrl.podIndexer.Add(pod)\n\t\t}\n\t\tctrl.podLister = corelisters.NewPodLister(podIndexer)\n\n\t\t// Run the tested functions\n\t\terr = test.test(ctrl, reactor.VolumeReactor, test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Wait for the target state\n\t\terr = reactor.waitTest(test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\tevaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)\n\t}\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tdoit(t, test)\n\t\t})\n\t}\n}",
"func (m *MockServiceControllerFactory) Build(mgr mc_manager.AsyncManager, clusterName string) (controller1.ServiceController, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Build\", mgr, clusterName)\n\tret0, _ := ret[0].(controller1.ServiceController)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
TestConcurrentBuildPodControllers tests the lifecycle of a build pod when running multiple controllers. | func TestConcurrentBuildPodControllers(t *testing.T) {
defer testutil.DumpEtcdOnFailure(t)
// Start a master with multiple BuildPodControllers
osClient, kClient := setupBuildControllerTest(controllerCount{BuildPodControllers: 5}, t)
build.RunBuildPodControllerTest(t, osClient, kClient)
} | [
"func TestConcurrentBuildControllersPodSync(t *testing.T) {\n\t// Start a master with multiple BuildControllers\n\tbuildClient, _, kClient, fn := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)\n\tdefer fn()\n\tbuild.RunBuildControllerPodSyncTest(t, buildClient, kClient)\n}",
"func TestConcurrentBuildControllers(t *testing.T) {\n\tdefer testutil.DumpEtcdOnFailure(t)\n\t// Start a master with multiple BuildControllers\n\tosClient, kClient := setupBuildControllerTest(controllerCount{BuildControllers: 5}, t)\n\tbuild.RunBuildControllerTest(t, osClient, kClient)\n}",
"func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {\n\tcontainerImage = trimDockerRegistry(containerImage)\n\tgetPodsTemplate := \"--template={{range.items}}{{.metadata.name}} {{end}}\"\n\n\tgetContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . \"status\" \"containerStatuses\")}}{{range .status.containerStatuses}}{{if (and (eq .name \"%s\") (exists . \"state\" \"running\"))}}true{{end}}{{end}}{{end}}`, containername)\n\n\tgetImageTemplate := fmt.Sprintf(`--template={{if (exists . \"spec\" \"containers\")}}{{range .spec.containers}}{{if eq .name \"%s\"}}{{.image}}{{end}}{{end}}{{end}}`, containername)\n\n\tginkgo.By(fmt.Sprintf(\"waiting for all containers in %s pods to come up.\", testname)) //testname should be selector\nwaitLoop:\n\tfor start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {\n\t\tgetPodsOutput := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", \"-o\", \"template\", getPodsTemplate, \"-l\", testname)\n\t\tpods := strings.Fields(getPodsOutput)\n\t\tif numPods := len(pods); numPods != replicas {\n\t\t\tginkgo.By(fmt.Sprintf(\"Replicas for %s: expected=%d actual=%d\", testname, replicas, numPods))\n\t\t\tcontinue\n\t\t}\n\t\tvar runningPods []string\n\t\tfor _, podID := range pods {\n\t\t\trunning := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getContainerStateTemplate)\n\t\t\tif running != \"true\" {\n\t\t\t\tframework.Logf(\"%s is created but not running\", podID)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tcurrentImage := e2ekubectl.RunKubectlOrDie(ns, \"get\", \"pods\", podID, \"-o\", \"template\", getImageTemplate)\n\t\t\tcurrentImage = trimDockerRegistry(currentImage)\n\t\t\tif currentImage != containerImage {\n\t\t\t\tframework.Logf(\"%s is created but running wrong image; expected: %s, actual: %s\", podID, containerImage, currentImage)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\t// Call the generic validator function here.\n\t\t\t// This might validate for example, that (1) getting a url works and (2) url is serving correct content.\n\t\t\tif err := validator(ctx, c, podID); err != nil {\n\t\t\t\tframework.Logf(\"%s is running right image but validator function failed: %v\", podID, err)\n\t\t\t\tcontinue waitLoop\n\t\t\t}\n\n\t\t\tframework.Logf(\"%s is verified up and running\", podID)\n\t\t\trunningPods = append(runningPods, podID)\n\t\t}\n\t\t// If we reach here, then all our checks passed.\n\t\tif len(runningPods) == replicas {\n\t\t\treturn\n\t\t}\n\t}\n\t// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.\n\tframework.Failf(\"Timed out after %v seconds waiting for %s pods to reach valid state\", framework.PodStartTimeout.Seconds(), testname)\n}",
"func deployControllers(ctx context.Context, ready chan ktfkind.ProxyReadinessEvent, cluster kind.Cluster, containerImage, namespace string) error {\n\t// ensure the controller namespace is created\n\tns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}\n\tif _, err := cluster.Client().CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}); err != nil {\n\t\tif !errors.IsAlreadyExists(err) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// run the controller in the background\n\tgo func() {\n\t\t// pull the readiness event for the proxy\n\t\tevent := <-ready\n\n\t\t// if there's an error, all tests fail here\n\t\tif event.Err != nil {\n\t\t\tpanic(event.Err)\n\t\t}\n\n\t\t// grab the admin hostname and pass the readiness event on to the tests\n\t\tu := event.ProxyAdminURL\n\t\tadminHost := u.Hostname()\n\t\tproxyReadyCh <- event\n\n\t\t// create a tempfile to hold the cluster kubeconfig that will be used for the controller\n\t\tkubeconfig, err := ioutil.TempFile(os.TempDir(), \"kubeconfig-\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer os.Remove(kubeconfig.Name())\n\n\t\t// dump the kubeconfig from kind into the tempfile\n\t\tgenerateKubeconfig := exec.CommandContext(ctx, \"kind\", \"get\", \"kubeconfig\", \"--name\", cluster.Name())\n\t\tgenerateKubeconfig.Stdout = kubeconfig\n\t\tgenerateKubeconfig.Stderr = os.Stderr\n\t\tif err := generateKubeconfig.Run(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tkubeconfig.Close()\n\n\t\t// deploy our CRDs to the cluster\n\t\tfor _, crd := range crds {\n\t\t\tcmd := exec.CommandContext(ctx, \"kubectl\", \"--kubeconfig\", kubeconfig.Name(), \"apply\", \"-f\", crd)\n\t\t\tstdout, stderr := new(bytes.Buffer), new(bytes.Buffer)\n\t\t\tcmd.Stdout = stdout\n\t\t\tcmd.Stderr = stderr\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\tfmt.Fprintln(os.Stdout, stdout.String())\n\t\t\t\tpanic(fmt.Errorf(\"%s: %w\", stderr.String(), err))\n\t\t\t}\n\t\t}\n\n\t\t// if set, allow running the legacy controller for the tests instead of the current controller\n\t\tvar cmd *exec.Cmd\n\t\tif useLegacyKIC() {\n\t\t\tcmd = buildLegacyCommand(ctx, kubeconfig.Name(), adminHost, cluster.Client())\n\t\t\tstdout, stderr := new(bytes.Buffer), new(bytes.Buffer)\n\t\t\tcmd.Stdout = io.MultiWriter(stdout, os.Stdout)\n\t\t\tcmd.Stderr = io.MultiWriter(stderr, os.Stderr)\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\tfmt.Fprintln(os.Stdout, stdout.String())\n\t\t\t\tpanic(fmt.Errorf(\"%s: %w\", stderr.String(), err))\n\t\t\t}\n\t\t} else {\n\t\t\tconfig := config.Config{}\n\t\t\tflags := config.FlagSet()\n\t\t\tflags.Parse([]string{\n\t\t\t\tfmt.Sprintf(\"--kong-admin-url=http://%s:8001\", adminHost),\n\t\t\t\tfmt.Sprintf(\"--kubeconfig=%s\", kubeconfig.Name()),\n\t\t\t\t\"--proxy-sync-seconds=0.05\", // run the test updates at 50ms for high speed\n\t\t\t\t\"--controller-kongstate=enabled\",\n\t\t\t\t\"--controller-ingress-networkingv1=enabled\",\n\t\t\t\t\"--controller-ingress-networkingv1beta1=disabled\",\n\t\t\t\t\"--controller-ingress-extensionsv1beta1=disabled\",\n\t\t\t\t\"--controller-udpingress=enabled\",\n\t\t\t\t\"--controller-tcpingress=enabled\",\n\t\t\t\t\"--controller-kongingress=enabled\",\n\t\t\t\t\"--controller-kongclusterplugin=enabled\",\n\t\t\t\t\"--controller-kongplugin=enabled\",\n\t\t\t\t\"--controller-kongconsumer=disabled\",\n\t\t\t\t\"--election-id=integrationtests.konghq.com\",\n\t\t\t\tfmt.Sprintf(\"--watch-namespace=%s\", watchNamespaces),\n\t\t\t\tfmt.Sprintf(\"--ingress-class=%s\", ingressClass),\n\t\t\t\t\"--log-level=trace\",\n\t\t\t\t\"--log-format=text\",\n\t\t\t\t\"--admission-webhook-listen=172.17.0.1:49023\",\n\t\t\t\tfmt.Sprintf(\"--admission-webhook-cert=%s\", admissionWebhookCert),\n\t\t\t\tfmt.Sprintf(\"--admission-webhook-key=%s\", admissionWebhookKey),\n\t\t\t})\n\t\t\tfmt.Fprintf(os.Stderr, \"config: %+v\\n\", config)\n\n\t\t\tif err := rootcmd.Run(ctx, &config); err != nil {\n\t\t\t\tpanic(fmt.Errorf(\"controller manager exited with error: %w\", err))\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}",
"func TestController(t *testing.T) {\n\tctx, _ := rtesting.SetupFakeContext(t)\n\tctx, cancel := context.WithTimeout(ctx, 30*time.Second)\n\tdefer cancel()\n\n\t// Create reconcilers, start controller.\n\tresults := test.NewResultsClient(t)\n\n\ttrctrl := taskrun.NewController(ctx, results)\n\tprctrl := pipelinerun.NewController(ctx, results)\n\tgo controller.StartAll(ctx, trctrl, prctrl)\n\n\t// Start informers - this notifies the controller of new events.\n\tgo taskruninformer.Get(ctx).Informer().Run(ctx.Done())\n\tgo pipelineruninformer.Get(ctx).Informer().Run(ctx.Done())\n\n\tpipeline := fakepipelineclient.Get(ctx)\n\tt.Run(\"taskrun\", func(t *testing.T) {\n\t\ttr := &v1beta1.TaskRun{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\tKind: \"TaskRun\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"taskrun\",\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\"demo\": \"demo\",\n\t\t\t\t\t// This TaskRun belongs to a PipelineRun, so the record should\n\t\t\t\t\t// be associated with the PipelineRun result.\n\t\t\t\t\t\"tekton.dev/pipelineRun\": \"pr\",\n\t\t\t\t},\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{{\n\t\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\t\tKind: \"PipelineRun\",\n\t\t\t\t\tUID: \"pr-id\",\n\t\t\t\t}},\n\t\t\t\tUID: \"tr-id\",\n\t\t\t},\n\t\t}\n\n\t\t// The following is a hack to make the fake clients play nice with\n\t\t// each other. While the controller uses the typed informer that uses\n\t\t// the fake pipeline client to receive events, the controller uses the\n\t\t// fake dynamic client to fetch and update objects during reconcile.\n\t\t// These fake clients store objects independently, so we create the\n\t\t// object in each client to make sure the data is populated in both\n\t\t// places.\n\t\tif _, err := pipeline.TektonV1beta1().TaskRuns(tr.GetNamespace()).Create(tr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tr)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ToUnstructured: %v\", err)\n\t\t}\n\t\t_, err = dynamicinject.Get(ctx).Resource(apis.KindToResource(tr.GroupVersionKind())).Namespace(tr.GetNamespace()).Create(&unstructured.Unstructured{Object: data}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Create: %v\", err)\n\t\t}\n\n\t\twait(ctx, t, tr, \"ns/results/pr-id\")\n\t})\n\n\tt.Run(\"pipelinerun\", func(t *testing.T) {\n\t\tpr := &v1beta1.PipelineRun{\n\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\tAPIVersion: \"tekton.dev/v1beta1\",\n\t\t\t\tKind: \"PipelineRun\",\n\t\t\t},\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: \"pr\",\n\t\t\t\tNamespace: \"ns\",\n\t\t\t\tAnnotations: map[string]string{\"demo\": \"demo\"},\n\t\t\t\tUID: \"pr-id\",\n\t\t\t},\n\t\t}\n\n\t\t// Same create hack as taskrun (see above).\n\t\tif _, err := pipeline.TektonV1beta1().PipelineRuns(pr.GetNamespace()).Create(pr); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pr)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"ToUnstructured: %v\", err)\n\t\t}\n\t\t_, err = dynamicinject.Get(ctx).Resource(apis.KindToResource(pr.GroupVersionKind())).Namespace(pr.GetNamespace()).Create(&unstructured.Unstructured{Object: data}, metav1.CreateOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Create: %v\", err)\n\t\t}\n\n\t\twait(ctx, t, pr, \"ns/results/pr-id\")\n\t})\n}",
"func SetupAddControllers(k kubernetes.Interface, namespace string) kubernetes.Interface {\n\td1 := MockDeploy()\n\tif _, err := k.AppsV1().Deployments(namespace).Create(&d1); err != nil {\n\t\tpanic(err)\n\t}\n\n\ts1 := MockStatefulSet()\n\tif _, err := k.AppsV1().StatefulSets(namespace).Create(&s1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tds1 := MockDaemonSet()\n\tif _, err := k.AppsV1().DaemonSets(namespace).Create(&ds1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tj1 := MockJob()\n\tif _, err := k.BatchV1().Jobs(namespace).Create(&j1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tcj1 := MockCronJob()\n\tif _, err := k.BatchV1beta1().CronJobs(namespace).Create(&cj1); err != nil {\n\t\tpanic(err)\n\t}\n\n\trc1 := MockReplicationController()\n\tif _, err := k.CoreV1().ReplicationControllers(namespace).Create(&rc1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tp1 := MockNakedPod()\n\tif _, err := k.CoreV1().Pods(namespace).Create(&p1); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn k\n}",
"func runSyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {\n\tdoit := func(t *testing.T, test controllerTest) {\n\t\t// Initialize the controller\n\t\tclient := &fake.Clientset{}\n\t\tctrl, err := newTestController(ctx, client, nil, true)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Test %q construct persistent volume failed: %v\", test.name, err)\n\t\t}\n\t\treactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)\n\t\tfor _, claim := range test.initialClaims {\n\t\t\tif metav1.HasAnnotation(claim.ObjectMeta, annSkipLocalStore) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tctrl.claims.Add(claim)\n\t\t}\n\t\tfor _, volume := range test.initialVolumes {\n\t\t\tif metav1.HasAnnotation(volume.ObjectMeta, annSkipLocalStore) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tctrl.volumes.store.Add(volume)\n\t\t}\n\t\treactor.AddClaims(test.initialClaims)\n\t\treactor.AddVolumes(test.initialVolumes)\n\n\t\t// Inject classes into controller via a custom lister.\n\t\tindexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, class := range storageClasses {\n\t\t\tindexer.Add(class)\n\t\t}\n\t\tctrl.classLister = storagelisters.NewStorageClassLister(indexer)\n\n\t\tpodIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})\n\t\tfor _, pod := range pods {\n\t\t\tpodIndexer.Add(pod)\n\t\t\tctrl.podIndexer.Add(pod)\n\t\t}\n\t\tctrl.podLister = corelisters.NewPodLister(podIndexer)\n\n\t\t// Run the tested functions\n\t\terr = test.test(ctrl, reactor.VolumeReactor, test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\t// Wait for the target state\n\t\terr = reactor.waitTest(test)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Test %q failed: %v\", test.name, err)\n\t\t}\n\n\t\tevaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)\n\t}\n\n\tfor _, test := range tests {\n\t\ttest := test\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tdoit(t, test)\n\t\t})\n\t}\n}",
"func TestMultipleDeploy(t *testing.T) {\n\tinitialize()\n\n\tscc := new(LifeCycleSysCC)\n\tstub := shim.NewMockStub(\"lccc\", scc)\n\n\t//deploy 02\n\tcds, err := constructDeploymentSpec(\"example02\", \"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example02\", [][]byte{[]byte(\"init\"), []byte(\"a\"), []byte(\"100\"), []byte(\"b\"), []byte(\"200\")})\n\tvar b []byte\n\tif b, err = proto.Marshal(cds); err != nil || b == nil {\n\t\tt.FailNow()\n\t}\n\n\targs := [][]byte{[]byte(DEPLOY), []byte(\"test\"), b}\n\tif _, err := stub.MockInvoke(\"1\", args); err != nil {\n\t\tt.FailNow()\n\t}\n\n\targs = [][]byte{[]byte(GETCCINFO), []byte(\"test\"), []byte(cds.ChaincodeSpec.ChaincodeID.Name)}\n\tif _, err := stub.MockInvoke(\"1\", args); err != nil {\n\t\tt.FailNow()\n\t}\n\n\t//deploy 01\n\tcds, err = constructDeploymentSpec(\"example01\", \"github.com/hyperledger/fabric/examples/chaincode/go/chaincode_example01\", [][]byte{[]byte(\"init\"), []byte(\"a\"), []byte(\"100\"), []byte(\"b\"), []byte(\"200\")})\n\tif b, err = proto.Marshal(cds); err != nil || b == nil {\n\t\tt.FailNow()\n\t}\n\n\targs = [][]byte{[]byte(DEPLOY), []byte(\"test\"), b}\n\tif _, err := stub.MockInvoke(\"1\", args); err != nil {\n\t\tt.FailNow()\n\t}\n\n\targs = [][]byte{[]byte(GETCCINFO), []byte(\"test\"), []byte(cds.ChaincodeSpec.ChaincodeID.Name)}\n\tif _, err := stub.MockInvoke(\"1\", args); err != nil {\n\t\tt.FailNow()\n\t}\n}",
"func TestDevPortForwardDeletePod(t *testing.T) {\n\tMarkIntegrationTest(t, CanRunWithoutGcp)\n\ttests := []struct {\n\t\tdir string\n\t}{\n\t\t{dir: \"examples/microservices\"},\n\t\t{dir: \"examples/multi-config-microservices\"},\n\t}\n\tfor _, test := range tests {\n\t\t// pre-build images to avoid tripping the 1-minute timeout in getLocalPortFromPortForwardEvent()\n\t\tskaffold.Build().InDir(test.dir).RunOrFail(t)\n\n\t\tns, client := SetupNamespace(t)\n\n\t\trpcAddr := randomPort()\n\t\tskaffold.Dev(\"--port-forward\", \"--rpc-port\", rpcAddr).InDir(test.dir).InNs(ns.Name).RunBackground(t)\n\t\tclient.WaitForDeploymentsToStabilize(\"leeroy-app\")\n\n\t\t_, entries := apiEvents(t, rpcAddr)\n\n\t\taddress, localPort := getLocalPortFromPortForwardEvent(t, entries, \"leeroy-app\", \"service\", ns.Name)\n\t\tassertResponseFromPort(t, address, localPort, constants.LeeroyAppResponse)\n\n\t\t// now, delete all pods in this namespace.\n\t\tRun(t, \".\", \"kubectl\", \"delete\", \"pods\", \"--all\", \"-n\", ns.Name)\n\n\t\tassertResponseFromPort(t, address, localPort, constants.LeeroyAppResponse)\n\t}\n}",
"func StartControllers(s *options.MCMServer,\n\tcontrolCoreKubeconfig *rest.Config,\n\ttargetCoreKubeconfig *rest.Config,\n\tcontrolMachineClientBuilder machinecontroller.ClientBuilder,\n\tcontrolCoreClientBuilder corecontroller.ClientBuilder,\n\ttargetCoreClientBuilder corecontroller.ClientBuilder,\n\trecorder record.EventRecorder,\n\tstop <-chan struct{}) error {\n\n\tklog.V(5).Info(\"Getting available resources\")\n\tavailableResources, err := getAvailableResources(controlCoreClientBuilder)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontrolMachineClient := controlMachineClientBuilder.ClientOrDie(controllerManagerAgentName).MachineV1alpha1()\n\n\tcontrolCoreKubeconfig = rest.AddUserAgent(controlCoreKubeconfig, controllerManagerAgentName)\n\tcontrolCoreClient, err := kubernetes.NewForConfig(controlCoreKubeconfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\n\ttargetCoreKubeconfig = rest.AddUserAgent(targetCoreKubeconfig, controllerManagerAgentName)\n\ttargetCoreClient, err := kubernetes.NewForConfig(targetCoreKubeconfig)\n\tif err != nil {\n\t\tklog.Fatal(err)\n\t}\n\n\tif availableResources[machineGVR] || availableResources[machineSetGVR] || availableResources[machineDeploymentGVR] {\n\t\tklog.V(5).Infof(\"Creating shared informers; resync interval: %v\", s.MinResyncPeriod)\n\n\t\tcontrolMachineInformerFactory := machineinformers.NewFilteredSharedInformerFactory(\n\t\t\tcontrolMachineClientBuilder.ClientOrDie(\"control-machine-shared-informers\"),\n\t\t\ts.MinResyncPeriod.Duration,\n\t\t\ts.Namespace,\n\t\t\tnil,\n\t\t)\n\n\t\tcontrolCoreInformerFactory := coreinformers.NewFilteredSharedInformerFactory(\n\t\t\tcontrolCoreClientBuilder.ClientOrDie(\"control-core-shared-informers\"),\n\t\t\ts.MinResyncPeriod.Duration,\n\t\t\ts.Namespace,\n\t\t\tnil,\n\t\t)\n\n\t\ttargetCoreInformerFactory := coreinformers.NewSharedInformerFactory(\n\t\t\ttargetCoreClientBuilder.ClientOrDie(\"target-core-shared-informers\"),\n\t\t\ts.MinResyncPeriod.Duration,\n\t\t)\n\n\t\t// All shared informers are v1alpha1 API level\n\t\tmachineSharedInformers := controlMachineInformerFactory.Machine().V1alpha1()\n\n\t\tklog.V(5).Infof(\"Creating controllers...\")\n\t\tmcmcontroller, err := mcmcontroller.NewController(\n\t\t\ts.Namespace,\n\t\t\tcontrolMachineClient,\n\t\t\tcontrolCoreClient,\n\t\t\ttargetCoreClient,\n\t\t\ttargetCoreInformerFactory.Core().V1().PersistentVolumeClaims(),\n\t\t\ttargetCoreInformerFactory.Core().V1().PersistentVolumes(),\n\t\t\tcontrolCoreInformerFactory.Core().V1().Secrets(),\n\t\t\ttargetCoreInformerFactory.Core().V1().Nodes(),\n\t\t\tmachineSharedInformers.OpenStackMachineClasses(),\n\t\t\tmachineSharedInformers.AWSMachineClasses(),\n\t\t\tmachineSharedInformers.AzureMachineClasses(),\n\t\t\tmachineSharedInformers.GCPMachineClasses(),\n\t\t\tmachineSharedInformers.AlicloudMachineClasses(),\n\t\t\tmachineSharedInformers.PacketMachineClasses(),\n\t\t\tmachineSharedInformers.Machines(),\n\t\t\tmachineSharedInformers.MachineSets(),\n\t\t\tmachineSharedInformers.MachineDeployments(),\n\t\t\trecorder,\n\t\t\ts.SafetyOptions,\n\t\t\ts.NodeConditions,\n\t\t\ts.BootstrapTokenAuthExtraGroups,\n\t\t\ts.DeleteMigratedMachineClass,\n\t\t\ts.AutoscalerScaleDownAnnotationDuringRollout,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tklog.V(1).Info(\"Starting shared informers\")\n\n\t\tcontrolMachineInformerFactory.Start(stop)\n\t\tcontrolCoreInformerFactory.Start(stop)\n\t\ttargetCoreInformerFactory.Start(stop)\n\n\t\tklog.V(5).Info(\"Running controller\")\n\t\tgo mcmcontroller.Run(int(s.ConcurrentNodeSyncs), stop)\n\n\t} else {\n\t\treturn fmt.Errorf(\"unable to start machine controller: API GroupVersion %q or %q or %q is not available; \\nFound: %#v\", machineGVR, machineSetGVR, machineDeploymentGVR, availableResources)\n\t}\n\n\tselect {}\n}",
"func TestConcurrentAccessToRelatedVolumes(ctx context.Context, f *framework.Framework, cs clientset.Interface, ns string,\n\tnode e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, expectedContent string) {\n\n\tvar pods []*v1.Pod\n\n\t// Create each pod with pvc\n\tfor i := range pvcs {\n\t\tindex := i + 1\n\t\tginkgo.By(fmt.Sprintf(\"Creating pod%d with a volume on %+v\", index, node))\n\t\tpodConfig := e2epod.Config{\n\t\t\tNS: ns,\n\t\t\tPVCs: []*v1.PersistentVolumeClaim{pvcs[i]},\n\t\t\tSeLinuxLabel: e2epod.GetLinuxLabel(),\n\t\t\tNodeSelection: node,\n\t\t\tPVCsReadOnly: false,\n\t\t\tImageID: e2epod.GetTestImageID(imageutils.JessieDnsutils),\n\t\t}\n\t\tpod, err := e2epod.CreateSecPodWithNodeSelection(ctx, cs, &podConfig, f.Timeouts.PodStart)\n\t\tdefer func() {\n\t\t\tframework.ExpectNoError(e2epod.DeletePodWithWait(ctx, cs, pod))\n\t\t}()\n\t\tframework.ExpectNoError(err)\n\t\tpods = append(pods, pod)\n\t\tactualNodeName := pod.Spec.NodeName\n\n\t\t// Always run the subsequent pods on the same node.\n\t\te2epod.SetAffinity(&node, actualNodeName)\n\t}\n\n\tfor i, pvc := range pvcs {\n\t\tvar commands []string\n\n\t\tif *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {\n\t\t\tfileName := \"/mnt/volume1\"\n\t\t\tcommands = e2evolume.GenerateReadBlockCmd(fileName, len(expectedContent))\n\t\t\t// Check that all pods have the same content\n\t\t\tindex := i + 1\n\t\t\tginkgo.By(fmt.Sprintf(\"Checking if the volume in pod%d has expected initial content\", index))\n\t\t\t_, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)\n\t\t\tframework.ExpectNoError(err, \"failed: finding the contents of the block volume %s.\", fileName)\n\t\t} else {\n\t\t\tfileName := \"/mnt/volume1/index.html\"\n\t\t\tcommands = e2evolume.GenerateReadFileCmd(fileName)\n\t\t\t// Check that all pods have the same content\n\t\t\tindex := i + 1\n\t\t\tginkgo.By(fmt.Sprintf(\"Checking if the volume in pod%d has expected initial content\", index))\n\t\t\t_, err := e2eoutput.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)\n\t\t\tframework.ExpectNoError(err, \"failed: finding the contents of the mounted file %s.\", fileName)\n\t\t}\n\t}\n}",
"func TestControllerHandleEvents(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\taddServices []*corev1.Service\n\t\tupdateServices []string\n\t\tdelServices []string\n\t\texpAddedServices []string\n\t\texpDeletedServices []string\n\t}{\n\t\t{\n\t\t\tname: \"If a controller is watching services it should react to the service change events.\",\n\t\t\taddServices: []*corev1.Service{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc1\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"svc2\"},\n\t\t\t\t\tSpec: corev1.ServiceSpec{\n\t\t\t\t\t\tType: \"ClusterIP\",\n\t\t\t\t\t\tPorts: []corev1.ServicePort{\n\t\t\t\t\t\t\tcorev1.ServicePort{Name: \"port1\", Port: 8080},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tupdateServices: []string{\"svc1\"},\n\t\t\tdelServices: []string{\"svc1\", \"svc2\"},\n\t\t\texpAddedServices: []string{\"svc1\", \"svc2\", \"svc1\"},\n\t\t\texpDeletedServices: []string{\"svc1\", \"svc2\"},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\trequire := require.New(t)\n\t\t\tassert := assert.New(t)\n\t\t\tresync := 30 * time.Second\n\t\t\tstopC := make(chan struct{})\n\t\t\tvar gotAddedServices []string\n\t\t\tvar gotDeletedServices []string\n\n\t\t\t// Create the kubernetes client.\n\t\t\tk8scli, _, _, err := cli.GetK8sClients(\"\")\n\n\t\t\trequire.NoError(err, \"kubernetes client is required\")\n\n\t\t\t// Prepare the environment on the cluster.\n\t\t\tprep := prepare.New(k8scli, t)\n\t\t\tprep.SetUp()\n\t\t\tdefer prep.TearDown()\n\n\t\t\t// Create the reitrever.\n\t\t\trt := &retrieve.Resource{\n\t\t\t\tListerWatcher: cache.NewListWatchFromClient(k8scli.CoreV1().RESTClient(), \"services\", prep.Namespace().Name, fields.Everything()),\n\t\t\t\tObject: &corev1.Service{},\n\t\t\t}\n\n\t\t\t// Call times are the number of times the handler should be called before sending the termination signal.\n\t\t\tstopCallTimes := len(test.addServices) + len(test.updateServices) + len(test.delServices)\n\t\t\tcalledTimes := 0\n\t\t\tvar mx sync.Mutex\n\n\t\t\t// Create the handler.\n\t\t\thl := &handler.HandlerFunc{\n\t\t\t\tAddFunc: func(_ context.Context, obj runtime.Object) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\tsvc := obj.(*corev1.Service)\n\t\t\t\t\tgotAddedServices = append(gotAddedServices, svc.Name)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(_ context.Context, id string) error {\n\t\t\t\t\tmx.Lock()\n\t\t\t\t\tcalledTimes++\n\t\t\t\t\tmx.Unlock()\n\n\t\t\t\t\t// Ignore namespace.\n\t\t\t\t\tid = strings.Split(id, \"/\")[1]\n\t\t\t\t\tgotDeletedServices = append(gotDeletedServices, id)\n\t\t\t\t\tif calledTimes >= stopCallTimes {\n\t\t\t\t\t\tclose(stopC)\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Create a Pod controller.\n\t\t\tctrl := controller.NewSequential(resync, hl, rt, nil, log.Dummy)\n\t\t\trequire.NotNil(ctrl, \"controller is required\")\n\t\t\tgo ctrl.Run(stopC)\n\n\t\t\t// Create the required services.\n\t\t\tfor _, svc := range test.addServices {\n\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Create(svc)\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\tfor _, svc := range test.updateServices {\n\t\t\t\torigSvc, err := k8scli.CoreV1().Services(prep.Namespace().Name).Get(svc, metav1.GetOptions{})\n\t\t\t\tif assert.NoError(err) {\n\t\t\t\t\t// Change something\n\t\t\t\t\torigSvc.Spec.Ports = append(origSvc.Spec.Ports, corev1.ServicePort{Name: \"updateport\", Port: 9876})\n\t\t\t\t\t_, err := k8scli.CoreV1().Services(prep.Namespace().Name).Update(origSvc)\n\t\t\t\t\tassert.NoError(err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Delete the required services.\n\t\t\tfor _, svc := range test.delServices {\n\t\t\t\terr := k8scli.CoreV1().Services(prep.Namespace().Name).Delete(svc, &metav1.DeleteOptions{})\n\t\t\t\tassert.NoError(err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t}\n\n\t\t\t// Wait until we have finished.\n\t\t\tselect {\n\t\t\t// Timeout.\n\t\t\tcase <-time.After(20 * time.Second):\n\t\t\t// Finished.\n\t\t\tcase <-stopC:\n\t\t\t}\n\n\t\t\t// Check.\n\t\t\tassert.Equal(test.expAddedServices, gotAddedServices)\n\t\t\tassert.Equal(test.expDeletedServices, gotDeletedServices)\n\t\t})\n\t}\n}",
"func TestMultiControlPlane(t *testing.T) {\n\tframework.NewTest(t).\n\t\tFeatures(\"installation.multiplecontrolplanes\").\n\t\tRun(func(t framework.TestContext) {\n\t\t\t// configure peerauthentication per system namespace\n\t\t\trestrictUserGroups(t)\n\n\t\t\ttestCases := []struct {\n\t\t\t\tname string\n\t\t\t\tstatusCode int\n\t\t\t\tfrom echo.Instances\n\t\t\t\tto echo.Instances\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tname: \"workloads within same usergroup can communicate, same namespace\",\n\t\t\t\t\tstatusCode: http.StatusOK,\n\t\t\t\t\tfrom: apps.NS[0].A,\n\t\t\t\t\tto: apps.NS[0].B,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"workloads within same usergroup can communicate, different namespaces\",\n\t\t\t\t\tstatusCode: http.StatusOK,\n\t\t\t\t\tfrom: apps.NS[1].A,\n\t\t\t\t\tto: apps.NS[2].B,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"workloads within different usergroups cannot communicate, registry only\",\n\t\t\t\t\tstatusCode: http.StatusBadGateway,\n\t\t\t\t\tfrom: apps.NS[0].A,\n\t\t\t\t\tto: apps.NS[1].B,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname: \"workloads within different usergroups cannot communicate, default passthrough\",\n\t\t\t\t\tstatusCode: http.StatusServiceUnavailable,\n\t\t\t\t\tfrom: apps.NS[2].B,\n\t\t\t\t\tto: apps.NS[0].B,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, tc := range testCases {\n\t\t\t\tt.NewSubTest(tc.name).Run(func(t framework.TestContext) {\n\t\t\t\t\ttc.from[0].CallOrFail(t, echo.CallOptions{\n\t\t\t\t\t\tTo: tc.to,\n\t\t\t\t\t\tPort: echo.Port{\n\t\t\t\t\t\t\tProtocol: protocol.HTTP,\n\t\t\t\t\t\t\tServicePort: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCheck: check.And(\n\t\t\t\t\t\t\tcheck.ErrorOrStatus(tc.statusCode),\n\t\t\t\t\t\t),\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t}\n\t\t})\n}",
"func TestController(t *testing.T) {\n\tfakeKubeClient, catalogClient, fakeBrokerCatalog, _, _, testController, _, stopCh := newTestController(t)\n\tdefer close(stopCh)\n\n\tt.Log(fakeKubeClient, catalogClient, fakeBrokerCatalog, testController, stopCh)\n\n\tfakeBrokerCatalog.RetCatalog = &brokerapi.Catalog{\n\t\tServices: []*brokerapi.Service{\n\t\t\t{\n\t\t\t\tName: \"test-service\",\n\t\t\t\tID: \"12345\",\n\t\t\t\tDescription: \"a test service\",\n\t\t\t\tPlans: []brokerapi.ServicePlan{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"test-plan\",\n\t\t\t\t\t\tFree: true,\n\t\t\t\t\t\tID: \"34567\",\n\t\t\t\t\t\tDescription: \"a test plan\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tname := \"test-name\"\n\tbroker := &v1alpha1.Broker{\n\t\tObjectMeta: v1.ObjectMeta{Name: name},\n\t\tSpec: v1alpha1.BrokerSpec{\n\t\t\tURL: \"https://example.com\",\n\t\t},\n\t}\n\tbrokerClient := catalogClient.Servicecatalog().Brokers()\n\n\tbrokerServer, err := brokerClient.Create(broker)\n\tif nil != err {\n\t\tt.Fatalf(\"error creating the broker %q (%q)\", broker, err)\n\t}\n\n\tif err := wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout,\n\t\tfunc() (bool, error) {\n\t\t\tbrokerServer, err = brokerClient.Get(name)\n\t\t\tif nil != err {\n\t\t\t\treturn false,\n\t\t\t\t\tfmt.Errorf(\"error getting broker %s (%s)\",\n\t\t\t\t\t\tname, err)\n\t\t\t} else if len(brokerServer.Status.Conditions) > 0 {\n\t\t\t\tt.Log(brokerServer)\n\t\t\t\treturn true, nil\n\t\t\t} else {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t},\n\t); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// check\n\tserviceClassClient := catalogClient.Servicecatalog().ServiceClasses()\n\t_, err = serviceClassClient.Get(\"test-service\")\n\tif nil != err {\n\t\tt.Fatal(\"could not find the test service\", err)\n\t}\n\n\t// cleanup our broker\n\terr = brokerClient.Delete(name, &v1.DeleteOptions{})\n\tif nil != err {\n\t\tt.Fatalf(\"broker should be deleted (%s)\", err)\n\t}\n\n\t// uncomment if/when deleting a broker deletes the associated service\n\t// if class, err := serviceClassClient.Get(\"test-service\"); nil == err {\n\t// \tt.Fatal(\"found the test service that should have been deleted\", err, class)\n\t// }\n}",
"func (c *Controller) sync(ctx context.Context, pod *corev1.Pod) error {\n\tlog := c.log.WithField(\"name\", pod.Name).WithField(\"namespace\", pod.Namespace)\n\n\tvar errs []string\n\tfor _, container := range pod.Spec.Containers {\n\t\tenable, ok := pod.Annotations[api.EnableAnnotationKey+\"/\"+container.Name]\n\t\tif c.defaultTestAll {\n\t\t\t// If default all and we explicitly disable, ignore\n\t\t\tif ok && enable == \"false\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\t// If not default all and we don't enable, ignore\n\t\t\tif !ok || enable != \"true\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tlog = log.WithField(\"container\", container.Name)\n\t\tlog.Debug(\"processing conainer image\")\n\n\t\topts, err := c.buildOptions(container.Name, pod.Annotations)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"failed to build options from annotations for %q: %s\",\n\t\t\t\tcontainer.Name, err))\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := c.testContainerImage(ctx, log, pod, &container, opts); err != nil {\n\t\t\terrs = append(errs, fmt.Sprintf(\"failed to test container image %q: %s\",\n\t\t\t\tcontainer.Name, err))\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t// Check the image tag again after the cache timeout.\n\tc.workqueue.AddAfter(pod, c.cacheTimeout)\n\n\tif len(errs) > 0 {\n\t\treturn fmt.Errorf(\"failed to sync pod %s/%s: %s\",\n\t\t\tpod.Name, pod.Namespace, strings.Join(errs, \",\"))\n\t}\n\n\treturn nil\n}",
"func getControllerPods(clientSet kubernetes.Interface, namespace string) (*corev1.PodList, error) {\n\tlabelSelector := metav1.LabelSelector{MatchLabels: map[string]string{constants.AppLabel: constants.OSMControllerName}}\n\tpodClient := clientSet.CoreV1().Pods(namespace)\n\tlistOptions := metav1.ListOptions{\n\t\tLabelSelector: labels.Set(labelSelector.MatchLabels).String(),\n\t}\n\treturn podClient.List(context.TODO(), metav1.ListOptions{LabelSelector: listOptions.LabelSelector})\n}",
"func startServerAndControllers(t *testing.T) (\n\t*kubefake.Clientset,\n\twatch.Interface,\n\tclustopclientset.Interface,\n\tcapiclientset.Interface,\n\t*capifakeclientset.Clientset,\n\tfunc()) {\n\n\t// create a fake kube client\n\tfakePtr := clientgotesting.Fake{}\n\tscheme := runtime.NewScheme()\n\tcodecs := serializer.NewCodecFactory(scheme)\n\tmetav1.AddToGroupVersion(scheme, schema.GroupVersion{Version: \"v1\"})\n\tkubefake.AddToScheme(scheme)\n\tobjectTracker := clientgotesting.NewObjectTracker(scheme, codecs.UniversalDecoder())\n\tkubeWatch := watch.NewRaceFreeFake()\n\t// Add a reactor for sending watch events when a job is modified\n\tobjectReaction := clientgotesting.ObjectReaction(objectTracker)\n\tfakePtr.AddReactor(\"*\", \"jobs\", func(action clientgotesting.Action) (bool, runtime.Object, error) {\n\t\tvar deletedObj runtime.Object\n\t\tif action, ok := action.(clientgotesting.DeleteActionImpl); ok {\n\t\t\tdeletedObj, _ = objectTracker.Get(action.GetResource(), action.GetNamespace(), action.GetName())\n\t\t}\n\t\thandled, obj, err := objectReaction(action)\n\t\tswitch action.(type) {\n\t\tcase clientgotesting.CreateActionImpl:\n\t\t\tkubeWatch.Add(obj)\n\t\tcase clientgotesting.UpdateActionImpl:\n\t\t\tkubeWatch.Modify(obj)\n\t\tcase clientgotesting.DeleteActionImpl:\n\t\t\tif deletedObj != nil {\n\t\t\t\tkubeWatch.Delete(deletedObj)\n\t\t\t}\n\t\t}\n\t\treturn handled, obj, err\n\t})\n\tfakePtr.AddWatchReactor(\"*\", clientgotesting.DefaultWatchReactor(kubeWatch, nil))\n\t// Create actual fake kube client\n\tfakeKubeClient := &kubefake.Clientset{Fake: fakePtr}\n\n\t// start the cluster-operator api server\n\tapiServerClientConfig, shutdownServer := servertesting.StartTestServerOrDie(t)\n\n\t// create a cluster-operator client\n\tclustopClient, err := clustopclientset.NewForConfig(apiServerClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\t// create a cluster-api client\n\tcapiClient, err := capiclientset.NewForConfig(apiServerClientConfig)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\n\tfakeCAPIClient := &capifakeclientset.Clientset{}\n\n\t// create informers\n\tkubeInformerFactory := kubeinformers.NewSharedInformerFactory(fakeKubeClient, 10*time.Second)\n\tbatchSharedInformers := kubeInformerFactory.Batch().V1()\n\tclustopInformerFactory := clustopinformers.NewSharedInformerFactory(clustopClient, 10*time.Second)\n\tcapiInformerFactory := capiinformers.NewSharedInformerFactory(capiClient, 10*time.Second)\n\tcapiSharedInformers := capiInformerFactory.Cluster().V1alpha1()\n\n\t// create controllers\n\tstopCh := make(chan struct{})\n\tt.Log(\"controller start\")\n\t// Note that controllers must be created prior to starting the informers.\n\t// Otherwise, the controllers will not get the initial sync from the\n\t// informer and will time out waiting to sync.\n\trunControllers := []func(){\n\t\t// infra\n\t\tfunc() func() {\n\t\t\tcontroller := infracontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// master\n\t\tfunc() func() {\n\t\t\tcontroller := mastercontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// components\n\t\tfunc() func() {\n\t\t\tcontroller := componentscontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// nodeconfig\n\t\tfunc() func() {\n\t\t\tcontroller := nodeconfigcontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// deployclusterapi\n\t\tfunc() func() {\n\t\t\tcontroller := deployclusterapicontroller.NewController(\n\t\t\t\tcapiSharedInformers.Clusters(),\n\t\t\t\tcapiSharedInformers.MachineSets(),\n\t\t\t\tbatchSharedInformers.Jobs(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t\t// awselb\n\t\tfunc() func() {\n\t\t\tcontroller := awselb.NewController(\n\t\t\t\tcapiSharedInformers.Machines(),\n\t\t\t\tfakeKubeClient,\n\t\t\t\tclustopClient,\n\t\t\t\tcapiClient,\n\t\t\t)\n\t\t\treturn func() { controller.Run(1, stopCh) }\n\t\t}(),\n\t}\n\tvar wg sync.WaitGroup\n\twg.Add(len(runControllers))\n\tfor _, run := range runControllers {\n\t\tgo func(r func()) {\n\t\t\tdefer wg.Done()\n\t\t\tr()\n\t\t}(run)\n\t}\n\n\tt.Log(\"informers start\")\n\tkubeInformerFactory.Start(stopCh)\n\tclustopInformerFactory.Start(stopCh)\n\tcapiInformerFactory.Start(stopCh)\n\n\tshutdown := func() {\n\t\t// Shut down controller\n\t\tclose(stopCh)\n\t\t// Wait for all controller to stop\n\t\twg.Wait()\n\t\t// Shut down api server\n\t\tshutdownServer()\n\t}\n\n\treturn fakeKubeClient, kubeWatch, clustopClient, capiClient, fakeCAPIClient, shutdown\n}",
"func runControllersAndInformers(t *testing.T, rm *replicaset.ReplicaSetController, dc *deployment.DeploymentController, informers informers.SharedInformerFactory) func() {\n\tctx, cancelFn := context.WithCancel(context.Background())\n\tinformers.Start(ctx.Done())\n\tgo rm.Run(ctx, 5)\n\tgo dc.Run(ctx, 5)\n\treturn cancelFn\n}",
"func SetupAddExtraControllerVersions(k kubernetes.Interface, namespace string) kubernetes.Interface {\n\tp := MockPod()\n\n\tdv1b1 := appsv1beta1.Deployment{\n\t\tSpec: appsv1beta1.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta1().Deployments(namespace).Create(&dv1b1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tdv1b2 := appsv1beta2.Deployment{\n\t\tSpec: appsv1beta2.DeploymentSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta2().Deployments(namespace).Create(&dv1b2); err != nil {\n\t\tpanic(err)\n\t}\n\n\tssv1b1 := appsv1beta1.StatefulSet{\n\t\tSpec: appsv1beta1.StatefulSetSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta1().StatefulSets(namespace).Create(&ssv1b1); err != nil {\n\t\tpanic(err)\n\t}\n\n\tssv1b2 := appsv1beta2.StatefulSet{\n\t\tSpec: appsv1beta2.StatefulSetSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta2().StatefulSets(namespace).Create(&ssv1b2); err != nil {\n\t\tpanic(err)\n\t}\n\n\tdsv1b2 := appsv1beta2.DaemonSet{\n\t\tSpec: appsv1beta2.DaemonSetSpec{\n\t\t\tTemplate: corev1.PodTemplateSpec{Spec: p.Spec},\n\t\t},\n\t}\n\tif _, err := k.AppsV1beta2().DaemonSets(namespace).Create(&dsv1b2); err != nil {\n\t\tpanic(err)\n\t}\n\treturn k\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RegisterTx is just like Register but marks the migration to be executed inside a transaction. | func RegisterTx(fns ...func(DB) error) error {
return DefaultCollection.RegisterTx(fns...)
} | [
"func (_Contract *ContractTransactor) Register(opts *bind.TransactOpts, id *big.Int, owner common.Address, duration *big.Int) (*types.Transaction, error) {\n\treturn _Contract.contract.Transact(opts, \"register\", id, owner, duration)\n}",
"func (_Contract *ContractTransactorSession) Register(id *big.Int, owner common.Address, duration *big.Int) (*types.Transaction, error) {\n\treturn _Contract.Contract.Register(&_Contract.TransactOpts, id, owner, duration)\n}",
"func (t *Transaction) SubmitTx() {}",
"func RegisterTransaction(data models.TransactionCache) (string, error) {\n\n\tgenKey, err := shortid.Generate()\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\tstoreKey := fmt.Sprintf(transactionKeyFmt, data.UserID, genKey)\n\n\tdataJSON, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\tif err := redisClient.Set(ctx, storeKey, dataJSON, 180*time.Second).Err(); err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn \"\", err\n\t}\n\n\treturn storeKey, nil\n\n}",
"func (m Middleware) Tx(db *sql.DB) TxFunc {\n\treturn func(f func(tx daos.Transaction, w http.ResponseWriter, r *http.Request) error) http.HandlerFunc {\n\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\tt, err := db.Begin()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tl := m.log.WithRequest(r)\n\t\t\t\tif p := recover(); p != nil {\n\t\t\t\t\tt.Rollback()\n\t\t\t\t\tl.Info(\"transaction rollbacked\")\n\t\t\t\t\tpanic(p)\n\t\t\t\t} else if err != nil {\n\t\t\t\t\tt.Rollback()\n\t\t\t\t\tl.Info(\"transaction rollbacked\")\n\t\t\t\t\tpanic(err)\n\t\t\t\t} else {\n\t\t\t\t\terr = t.Commit()\n\t\t\t\t\tl.Info(\"transaction commited\")\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\terr = f(t, w, r)\n\t\t}\n\t}\n}",
"func (app *App) RegisterTxService(clientCtx client.Context) {\n\tauthtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry)\n}",
"func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil }",
"func (d *Driver) Tx(ctx context.Context) (dialect.Tx, error) {\n\treturn d.BeginTx(ctx, nil)\n}",
"func (db *DB) Transaction(ctx context.Context, fn TxHandlerFunc) error {\n\tdb.mu.Lock()\n\tdefer db.mu.Unlock()\n\n\torigin, err := db.master.BeginTx(ctx, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to begin transaction: %v\", err)\n\t}\n\ttx := &Tx{origin}\n\n\tif err := fn(ctx, tx); err != nil {\n\t\tif re := tx.parent.Rollback(); re != nil {\n\t\t\tif re.Error() != sql.ErrTxDone.Error() {\n\t\t\t\treturn fmt.Errorf(\"fialed to rollback: %v\", err)\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"failed to execcute transaction: %v\", err)\n\t}\n\treturn tx.parent.Commit()\n}",
"func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsTransactor) Register(opts *bind.TransactOpts, name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.contract.Transact(opts, \"register\", name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source)\n}",
"func (_KNS *KNSTransactor) Register(opts *bind.TransactOpts, prime_owner common.Address, wallet common.Address, Jid string, tel string) (*types.Transaction, error) {\n\treturn _KNS.contract.Transact(opts, \"Register\", prime_owner, wallet, Jid, tel)\n}",
"func Transaction(db *sql.DB, fns ...func(DB) error) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fn := range fns {\n\t\terr := fn(tx)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = tx.Commit()\n\terr = interpretScanError(err)\n\treturn err\n}",
"func sendRegisterTx(cdc *wire.Codec) client.CommandTxCallback {\n\treturn func(cmd *cobra.Command, args []string) error {\n\t\tctx := client.NewCoreContextFromViper()\n\t\tname := viper.GetString(client.FlagUser)\n\t\treferrer := viper.GetString(client.FlagReferrer)\n\t\tamount := viper.GetString(client.FlagAmount)\n\n\t\tresetPriv := secp256k1.GenPrivKey()\n\t\ttransactionPriv := secp256k1.GenPrivKey()\n\t\tappPriv := secp256k1.GenPrivKey()\n\n\t\tfmt.Println(\"reset private key is:\", strings.ToUpper(hex.EncodeToString(resetPriv.Bytes())))\n\t\tfmt.Println(\"transaction private key is:\", strings.ToUpper(hex.EncodeToString(transactionPriv.Bytes())))\n\t\tfmt.Println(\"app private key is:\", strings.ToUpper(hex.EncodeToString(appPriv.Bytes())))\n\n\t\t// // create the message\n\t\tmsg := acc.NewRegisterMsg(\n\t\t\treferrer, name, types.LNO(amount),\n\t\t\tresetPriv.PubKey(), transactionPriv.PubKey(), appPriv.PubKey())\n\n\t\t// build and sign the transaction, then broadcast to Tendermint\n\t\tres, err := ctx.SignBuildBroadcast([]sdk.Msg{msg}, cdc)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfmt.Printf(\"Committed at block %d. Hash: %s\\n\", res.Height, res.Hash.String())\n\t\treturn nil\n\t}\n}",
"func (s service) InsertTx(ctx context.Context, tx *sql.Tx, userId uint64, groupNames *[]string) error {\n\treturn s.repo.InsertTx(ctx, tx, userId, groupNames)\n}",
"func TestSetupTx(t *testing.T) (Txer, func()) {\n\tdb, err := sql.Open(\"txdb\", uuid.NewV4().String())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcleanup := func() {\n\t\ttx.Rollback()\n\t\tdb.Close()\n\t}\n\treturn tx, cleanup\n}",
"func (c *Conn) Transaction(fn func(*Conn) error) error {\r\n\tvar (\r\n\t\ttx = c.Begin()\r\n\t\tconn = &Conn{}\r\n\t)\r\n\tcopier.Copy(conn, c)\r\n\tconn.DB = tx\r\n\tif err := fn(conn); err != nil {\r\n\t\ttx.Rollback()\r\n\t\treturn err\r\n\t}\r\n\ttx.Commit()\r\n\treturn nil\r\n}",
"func Register(up, down func(DB) error) error {\n\t_, file, _, _ := runtime.Caller(1)\n\tversion, err := extractVersion(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallMigrations = append(allMigrations, Migration{\n\t\tVersion: version,\n\t\tUp: up,\n\t\tDown: down,\n\t})\n\treturn nil\n}",
"func (_Ethdkg *EthdkgTransactor) Register(opts *bind.TransactOpts, public_key [2]*big.Int) (*types.Transaction, error) {\n\treturn _Ethdkg.contract.Transact(opts, \"register\", public_key)\n}",
"func (_UpkeepRegistrationRequests *UpkeepRegistrationRequestsTransactorSession) Register(name string, encryptedEmail []byte, upkeepContract common.Address, gasLimit uint32, adminAddress common.Address, checkData []byte, amount *big.Int, source uint8) (*types.Transaction, error) {\n\treturn _UpkeepRegistrationRequests.Contract.Register(&_UpkeepRegistrationRequests.TransactOpts, name, encryptedEmail, upkeepContract, gasLimit, adminAddress, checkData, amount, source)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RegisteredMigrations returns currently registered Migrations. | func RegisteredMigrations() []*Migration {
return DefaultCollection.Migrations()
} | [
"func GetMigrations() []*Migration {\n\treturn _migrations\n}",
"func GetMigrations() Migrations {\n\tm := Migrations{}\n\n\t// Version 0\n\tm = append(m, steps{ExecuteSQLFile(\"000-bootstrap.sql\")})\n\n\t// Version 1\n\tm = append(m, steps{ExecuteSQLFile(\"001-common.sql\")})\n\n\t// Version 2\n\tm = append(m, steps{ExecuteSQLFile(\"002-tracker-items.sql\")})\n\n\t// Version 3\n\tm = append(m, steps{ExecuteSQLFile(\"003-login.sql\")})\n\n\t// Version 4\n\tm = append(m, steps{ExecuteSQLFile(\"004-drop-tracker-query-id.sql\")})\n\n\t// Version 5\n\tm = append(m, steps{ExecuteSQLFile(\"005-add-search-index.sql\")})\n\n\t// Version 6\n\tm = append(m, steps{ExecuteSQLFile(\"006-rename-parent-path.sql\")})\n\n\t// Version 7\n\tm = append(m, steps{ExecuteSQLFile(\"007-work-item-links.sql\")})\n\n\t// Version 8\n\tm = append(m, steps{ExecuteSQLFile(\"008-soft-delete-or-resurrect.sql\")})\n\n\t// Version 9\n\tm = append(m, steps{ExecuteSQLFile(\"009-drop-wit-trigger.sql\")})\n\n\t// Version 10\n\tm = append(m, steps{ExecuteSQLFile(\"010-comments.sql\")})\n\n\t// Version 11\n\tm = append(m, steps{ExecuteSQLFile(\"011-projects.sql\")})\n\n\t// Version 12\n\tm = append(m, steps{ExecuteSQLFile(\"012-unique-work-item-links.sql\")})\n\n\t// version 13\n\tm = append(m, steps{ExecuteSQLFile(\"013-iterations.sql\")})\n\n\t// Version 14\n\tm = append(m, steps{ExecuteSQLFile(\"014-wi-fields-index.sql\")})\n\n\t// Version 15\n\tm = append(m, steps{ExecuteSQLFile(\"015-rename-projects-to-spaces.sql\")})\n\n\t// Version 16\n\tm = append(m, steps{ExecuteSQLFile(\"016-drop-wi-links-trigger.sql\")})\n\n\t// Version 17\n\tm = append(m, steps{ExecuteSQLFile(\"017-alter-iterations.sql\")})\n\n\t// Version 18\n\tm = append(m, steps{ExecuteSQLFile(\"018-rewrite-wits.sql\")})\n\n\t// Version 19\n\tm = append(m, steps{ExecuteSQLFile(\"019-add-state-iterations.sql\")})\n\n\t// Version 20\n\tm = append(m, steps{ExecuteSQLFile(\"020-work-item-description-update-search-index.sql\")})\n\n\t// Version 21\n\tm = append(m, steps{ExecuteSQLFile(\"021-add-space-description.sql\")})\n\n\t// Version 22\n\tm = append(m, steps{ExecuteSQLFile(\"022-work-item-description-update.sql\")})\n\n\t// Version 23\n\tm = append(m, steps{ExecuteSQLFile(\"023-comment-markup.sql\")})\n\n\t// Version 24\n\tm = append(m, steps{ExecuteSQLFile(\"024-comment-markup-default.sql\")})\n\n\t// Version 25\n\tm = append(m, steps{ExecuteSQLFile(\"025-refactor-identities-users.sql\")})\n\n\t// version 26\n\tm = append(m, steps{ExecuteSQLFile(\"026-areas.sql\")})\n\n\t// version 27\n\tm = append(m, steps{ExecuteSQLFile(\"027-areas-index.sql\")})\n\n\t// Version 28\n\tm = append(m, steps{ExecuteSQLFile(\"028-identity-provider_url.sql\")})\n\n\t// Version 29\n\tm = append(m, steps{ExecuteSQLFile(\"029-identities-foreign-key.sql\")})\n\n\t// Version 30\n\tm = append(m, steps{ExecuteSQLFile(\"030-indentities-unique-index.sql\")})\n\n\t// Version 31\n\tm = append(m, steps{ExecuteSQLFile(\"031-iterations-parent-path-ltree.sql\")})\n\n\t// Version 32\n\tm = append(m, steps{ExecuteSQLFile(\"032-add-foreign-key-space-id.sql\")})\n\n\t// Version 33\n\tm = append(m, steps{ExecuteSQLFile(\"033-add-space-id-wilt.sql\", space.SystemSpace.String(), \"system.space\", \"Description of the space\")})\n\n\t// Version 34\n\tm = append(m, steps{ExecuteSQLFile(\"034-space-owner.sql\")})\n\n\t// Version 35\n\tm = append(m, steps{ExecuteSQLFile(\"035-wit-to-use-uuid.sql\",\n\t\tworkitem.SystemPlannerItem.String(),\n\t\tworkitem.SystemTask.String(),\n\t\tworkitem.SystemValueProposition.String(),\n\t\tworkitem.SystemFundamental.String(),\n\t\tworkitem.SystemExperience.String(),\n\t\tworkitem.SystemFeature.String(),\n\t\tworkitem.SystemScenario.String(),\n\t\tworkitem.SystemBug.String())})\n\n\t// Version 36\n\tm = append(m, steps{ExecuteSQLFile(\"036-add-icon-to-wit.sql\")})\n\n\t// version 37\n\tm = append(m, steps{ExecuteSQLFile(\"037-work-item-revisions.sql\")})\n\n\t// Version 38\n\tm = append(m, steps{ExecuteSQLFile(\"038-comment-revisions.sql\")})\n\n\t// Version 39\n\tm = append(m, steps{ExecuteSQLFile(\"039-comment-revisions-parentid.sql\")})\n\n\t// Version 40\n\tm = append(m, steps{ExecuteSQLFile(\"040-add-space-id-wi-wit-tq.sql\", space.SystemSpace.String())})\n\n\t// version 41\n\tm = append(m, steps{ExecuteSQLFile(\"041-unique-area-name-create-new-area.sql\")})\n\n\t// Version 42\n\tm = append(m, steps{ExecuteSQLFile(\"042-work-item-link-revisions.sql\")})\n\n\t// Version 43\n\tm = append(m, steps{ExecuteSQLFile(\"043-space-resources.sql\")})\n\n\t// Version 44\n\tm = append(m, steps{ExecuteSQLFile(\"044-add-contextinfo-column-users.sql\")})\n\n\t// Version 45\n\tm = append(m, steps{ExecuteSQLFile(\"045-adds-order-to-existing-wi.sql\")})\n\n\t// Version 46\n\tm = append(m, steps{ExecuteSQLFile(\"046-oauth-states.sql\")})\n\n\t// Version 47\n\tm = append(m, steps{ExecuteSQLFile(\"047-codebases.sql\")})\n\n\t// Version 48\n\tm = append(m, steps{ExecuteSQLFile(\"048-unique-iteration-name-create-new-iteration.sql\")})\n\n\t// Version 49\n\tm = append(m, steps{ExecuteSQLFile(\"049-add-wi-to-root-area.sql\")})\n\n\t// Version 50\n\tm = append(m, steps{ExecuteSQLFile(\"050-add-company-to-user-profile.sql\")})\n\n\t// Version 51\n\tm = append(m, steps{ExecuteSQLFile(\"051-modify-work_item_link_types_name_idx.sql\")})\n\n\t// Version 52\n\tm = append(m, steps{ExecuteSQLFile(\"052-unique-space-names.sql\")})\n\n\t// Version 53\n\tm = append(m, steps{ExecuteSQLFile(\"053-edit-username.sql\")})\n\n\t// Version 54\n\tm = append(m, steps{ExecuteSQLFile(\"054-add-stackid-to-codebase.sql\")})\n\n\t// Version 55\n\tm = append(m, steps{ExecuteSQLFile(\"055-assign-root-area-if-missing.sql\")})\n\n\t// Version 56\n\tm = append(m, steps{ExecuteSQLFile(\"056-assign-root-iteration-if-missing.sql\")})\n\n\t// Version 57\n\tm = append(m, steps{ExecuteSQLFile(\"057-add-last-used-workspace-to-codebase.sql\")})\n\n\t// Version 58\n\tm = append(m, steps{ExecuteSQLFile(\"058-index-identities-fullname.sql\")})\n\n\t// Version 59\n\tm = append(m, steps{ExecuteSQLFile(\"059-fixed-ids-for-system-link-types-and-categories.sql\",\n\t\tlink.SystemWorkItemLinkTypeBugBlockerID.String(),\n\t\tlink.SystemWorkItemLinkPlannerItemRelatedID.String(),\n\t\tlink.SystemWorkItemLinkTypeParentChildID.String(),\n\t\tlink.SystemWorkItemLinkCategorySystemID.String(),\n\t\tlink.SystemWorkItemLinkCategoryUserID.String())})\n\n\t// Version 60\n\tm = append(m, steps{ExecuteSQLFile(\"060-fixed-identities-username-idx.sql\")})\n\n\t// Version 61\n\tm = append(m, steps{ExecuteSQLFile(\"061-replace-index-space-name.sql\")})\n\n\t// Version 62\n\tm = append(m, steps{ExecuteSQLFile(\"062-link-system-preparation.sql\")})\n\n\t// Version 63\n\tm = append(m, steps{ExecuteSQLFile(\"063-workitem-related-changes.sql\")})\n\n\t// Version 64\n\tm = append(m, steps{ExecuteSQLFile(\"064-remove-link-combinations.sql\")})\n\n\t// Version 65\n\tm = append(m, steps{ExecuteSQLFile(\"065-workitem-id-unique-per-space.sql\")})\n\n\t// Version 66\n\tm = append(m, steps{ExecuteSQLFile(\"066-work_item_links_data_integrity.sql\")})\n\n\t// Version 67\n\tm = append(m, steps{ExecuteSQLFile(\"067-comment-parentid-uuid.sql\")})\n\n\t// Version 68\n\tm = append(m, steps{ExecuteSQLFile(\"068-index_identities_username.sql\")})\n\n\t// Version 69\n\tm = append(m, steps{ExecuteSQLFile(\"069-limit-execution-order-to-space.sql\")})\n\n\t// Version 70\n\tm = append(m, steps{ExecuteSQLFile(\"070-rename-comment-createdby-to-creator.sql\")})\n\n\t// Version 71\n\tm = append(m, steps{ExecuteSQLFile(\"071-iteration-related-changes.sql\")})\n\n\t// Version 72\n\tm = append(m, steps{ExecuteSQLFile(\"072-adds-active-flag-in-iteration.sql\")})\n\n\t// Version 73\n\tm = append(m, steps{ExecuteSQLFile(\"073-labels.sql\")})\n\n\t// Version 74\n\tm = append(m, steps{ExecuteSQLFile(\"074-label-border-color.sql\")})\n\n\t// Version 75\n\tm = append(m, steps{ExecuteSQLFile(\"075-label-unique-name.sql\")})\n\n\t// Version 76\n\tm = append(m, steps{ExecuteSQLFile(\"076-drop-space-resources-and-oauth-state.sql\")})\n\n\t// Version 77\n\tm = append(m, steps{ExecuteSQLFile(\"077-index-work-item-links.sql\")})\n\n\t// Version 78\n\tm = append(m, steps{ExecuteSQLFile(\"078-tracker-to-use-uuid.sql\")})\n\n\t// Version 79\n\tm = append(m, steps{ExecuteSQLFile(\"079-assignee-and-label-empty-value.sql\", workitem.SystemAssignees, workitem.SystemLabels)})\n\n\t// Version N\n\t//\n\t// In order to add an upgrade, simply append an array of MigrationFunc to the\n\t// the end of the \"migrations\" slice. The version numbers are determined by\n\t// the index in the array. The following code in comments show how you can\n\t// do a migration in 3 steps. If one of the steps fails, the others are not\n\t// executed.\n\t// If something goes wrong during the migration, all you need to do is return\n\t// an error that is not nil.\n\n\t/*\n\t\tm = append(m, steps{\n\t\t\tfunc(db *sql.Tx) error {\n\t\t\t\t// Execute random go code\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tExecuteSQLFile(\"YOUR_OWN_FILE.sql\"),\n\t\t\tfunc(db *sql.Tx) error {\n\t\t\t\t// Execute random go code\n\t\t\t\treturn nil\n\t\t\t},\n\t\t})\n\t*/\n\n\treturn m\n}",
"func (m Migrator) RunMigrations() error {\n\t// Run everything in a transaction. In case of error, we can roll it back\n\ttx, err := m.Connection.Database.Begin()\n\tif err != nil {\n\t\t// Connection could not be started\n\t\treturn err\n\t}\n\n\t// First check if the database db_migrations exists\n\tres := tx.QueryRow(`SELECT EXISTS(\n\t\tSELECT *\n\t\tFROM information_schema.tables\n\t\tWHERE\n\t\t\ttable_schema = 'public' AND\n\t\t\ttable_name = 'db_migrations'\n\t)`)\n\n\tvar migTablePresent bool\n\terr = res.Scan(&migTablePresent)\n\tif err != nil {\n\t\t// result was invalid\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\talreadyRunMigrations := make(map[string]bool)\n\tif !migTablePresent {\n\t\t_, err = tx.Query(`\n\t\t\tCREATE TABLE db_migrations (version VARCHAR(50) NOT NULL, executed_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP, PRIMARY KEY(version))\n\t\t`)\n\t\tif err != nil {\n\t\t\t// could not create db_migration table\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tversionRows, err := tx.Query(`\n\t\t\tSELECT version FROM db_migrations\n\t\t`)\n\t\tif err != nil {\n\t\t\t// could not fetch the list of executed migrations\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\t\tfor versionRows.Next() {\n\t\t\tvar version string\n\t\t\terr = versionRows.Scan(&version)\n\t\t\tif err != nil {\n\t\t\t\t// A version number could not be parsed\n\t\t\t\ttx.Rollback()\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\talreadyRunMigrations[version] = true\n\t\t}\n\t}\n\n\tavailableMigrations, err := m.checkAvailableMigrations()\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\tvar migrationsToRun []string\n\tfor version := range availableMigrations {\n\t\tif _, ok := alreadyRunMigrations[version]; !ok {\n\t\t\tmigrationsToRun = append(migrationsToRun, version)\n\t\t}\n\t}\n\tfor version := range alreadyRunMigrations {\n\t\tif _, ok := availableMigrations[version]; !ok {\n\t\t\t// Warn there is a present migration with no corresponding file\n\t\t}\n\t}\n\n\tfor _, version := range migrationsToRun {\n\t\tmigrationByteContent, err := migration.Asset(fmt.Sprintf(\"%s_up.sql\", version))\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\t\tmigrationContent := string(migrationByteContent)\n\n\t\t_, err = tx.Query(migrationContent)\n\t\tif err != nil {\n\t\t\t// There was an error running the migration\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.Query(`INSERT INTO db_migrations (version) VALUES ($1)`, version)\n\t\tif err != nil {\n\t\t\t// There was an error running the migration\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttx.Commit()\n\n\treturn nil\n}",
"func RegisteredVersions() (versions []int64) {\n\tmigrations := pgmigrations.DefaultCollection.Migrations()\n\tfor _, m := range migrations {\n\t\tversions = append(versions, m.Version)\n\t}\n\treturn\n}",
"func GetMigrations(configuration MigrationConfiguration) Migrations {\n\tm := Migrations{}\n\n\t// Version 0\n\tm = append(m, steps{ExecuteSQLFile(\"000-bootstrap.sql\")})\n\n\t// Version 1\n\tm = append(m, steps{ExecuteSQLFile(\"001-identities-users.sql\")})\n\n\t// Version 2\n\tm = append(m, steps{ExecuteSQLFile(\"002-oauth-states.sql\")})\n\n\t// Version 3\n\tm = append(m, steps{ExecuteSQLFile(\"003-space-resources.sql\")})\n\n\t// Version 4\n\tm = append(m, steps{ExecuteSQLFile(\"004-unique-resource-space.sql\")})\n\n\t// Version 5\n\tm = append(m, steps{ExecuteSQLFile(\"005-authorization.sql\")})\n\n\t// Version 6\n\tm = append(m, steps{ExecuteSQLFile(\"006-external-provider.sql\")})\n\n\t// Version 7\n\tm = append(m, steps{ExecuteSQLFile(\"007-external-provider-id-index.sql\")})\n\n\t// Version 8\n\tm = append(m, steps{ExecuteSQLFile(\"008-rename-token-table.sql\")})\n\n\t// Version 9\n\tm = append(m, steps{ExecuteSQLFile(\"009-external-token-hard-delete.sql\")})\n\n\t// Version 10\n\tdefaultCluster := configuration.GetOpenShiftClientApiUrl()\n\tm = append(m, steps{ExecuteSQLFile(\"010-add-cluster-to-user.sql\", defaultCluster)})\n\n\t// Version 11\n\tm = append(m, steps{ExecuteSQLFile(\"011-add-username-to-external-token.sql\")})\n\n\t// Version 12\n\tm = append(m, steps{ExecuteSQLFile(\"012-hide-email.sql\")})\n\n\t// Version 13\n\tm = append(m, steps{ExecuteSQLFile(\"013-add-email-verified.sql\")})\n\n\t// Version 14\n\tm = append(m, steps{ExecuteSQLFile(\"014-add-user-feature-level.sql\")})\n\n\t// Version 15\n\tm = append(m, steps{ExecuteSQLFile(\"015-clear-resources-create-resource-types.sql\")})\n\n\t// Version 16\n\tm = append(m, steps{ExecuteSQLFile(\"016-add-state-to-auth-state-reference.sql\")})\n\n\t// Version 17\n\tm = append(m, steps{ExecuteSQLFile(\"017-feature-level-not-null.sql\")})\n\n\t// Version 18\n\tm = append(m, steps{ExecuteSQLFile(\"018-convert-user-feature-level.sql\")})\n\n\t// Version 19\n\tm = append(m, steps{ExecuteSQLFile(\"019-authorization-part-2.sql\")})\n\n\t// Version 20\n\tm = append(m, steps{ExecuteSQLFile(\"020-add-response-mode-to-auth-state-reference.sql\")})\n\n\t// Version 21\n\tm = append(m, steps{ExecuteSQLFile(\"021-organizations-list-create.sql\")})\n\n\t// Version 22\n\tm = append(m, steps{ExecuteSQLFile(\"022-add-deprovisioned-to-user.sql\")})\n\n\t// Version 23\n\tm = append(m, steps{ExecuteSQLFile(\"023-resource-type-index.sql\")})\n\n\t// Version 24\n\tm = append(m, steps{ExecuteSQLFile(\"024-role-mapping-and-team-and-group-identities.sql\")})\n\n\t// Version 25\n\tm = append(m, steps{ExecuteSQLFile(\"025-fix-feature-level.sql\")})\n\n\t// Version 26\n\tm = append(m, steps{ExecuteSQLFile(\"026-identities-users-indexes.sql\")})\n\n\t// Version 27\n\tm = append(m, steps{ExecuteSQLFile(\"027-invitations.sql\")})\n\n\t// Version 28\n\tm = append(m, steps{ExecuteSQLFile(\"028-make-organization-names-unique.sql\")})\n\n\t// Version 29\n\tm = append(m, steps{ExecuteSQLFile(\"029-add-space-resourcetype.sql\")})\n\n\t// Version 30\n\tm = append(m, steps{ExecuteSQLFile(\"030-add-team-admin-role.sql\")})\n\n\t// Version 31\n\tm = append(m, steps{ExecuteSQLFile(\"031-clean-up-roles-scopes.sql\")})\n\n\t// Version 32\n\tm = append(m, steps{ExecuteSQLFile(\"032-invitation-code.sql\")})\n\n\t// Version 33\n\tm = append(m, steps{ExecuteSQLFile(\"033-drop-space-resources.sql\")})\n\n\t// Version 34\n\tm = append(m, steps{ExecuteSQLFile(\"034-rename-token-table.sql\")})\n\n\t// Version 35\n\tm = append(m, steps{ExecuteSQLFile(\"035-unique_constraint_default_role_mapping.sql\")})\n\n\t// Version 36\n\tm = append(m, steps{ExecuteSQLFile(\"036-token-privileges.sql\")})\n\n\t// Version 37\n\tm = append(m, steps{ExecuteSQLFile(\"037-invitation-redirect-url.sql\")})\n\n\t// Version 38\n\tm = append(m, steps{ExecuteSQLFile(\"038-admin-console-resource.sql\")})\n\n\t// Version 39\n\tm = append(m, steps{ExecuteSQLFile(\"039-resource-type-alter.sql\")})\n\n\t// Version 40\n\tm = append(m, steps{ExecuteSQLFile(\"040-deferrable-constraints.sql\")})\n\n\t// Version 41\n\tm = append(m, steps{ExecuteSQLFile(\"041-identity-role-index.sql\")})\n\n\t// Version 42\n\tm = append(m, steps{ExecuteSQLFile(\"042-token-index.sql\")})\n\n\t// Version 43\n\tm = append(m, steps{ExecuteSQLFile(\"043-add-admin-console-resource.sql\")})\n\n\t// Version 44\n\tm = append(m, steps{ExecuteSQLFile(\"044-user-active.sql\")})\n\n\t// Version 45\n\tm = append(m, steps{ExecuteSQLFile(\"045-identity-last-active.sql\")})\n\n\t// Version 46\n\tm = append(m, steps{ExecuteSQLFile(\"046-identity-last-active-default.sql\")})\n\n\t// Version 47\n\tm = append(m, steps{ExecuteSQLFile(\"047-add-user-banned-column.sql\")})\n\n\t// Version 48\n\tm = append(m, steps{ExecuteSQLFile(\"048-identity-deactivation-notification.sql\")})\n\n\t// Version 49\n\tm = append(m, steps{ExecuteSQLFile(\"049-user-banned-index.sql\")})\n\n\t// Version 50\n\tm = append(m, steps{ExecuteSQLFile(\"050-worker-lock.sql\")})\n\n\t// Version 51\n\tm = append(m, steps{ExecuteSQLFile(\"051-identity-deactivation-scheduled.sql\")})\n\n\t// Version 52\n\tm = append(m, steps{ExecuteSQLFile(\"052-deferrable-constraints2.sql\")})\n\n\t// Version 53\n\tm = append(m, steps{ExecuteSQLFile(\"053-deactivation-indexes.sql\")})\n\n\t// Version 54\n\tm = append(m, steps{ExecuteSQLFile(\"054-cleanup-oauth-state-references.sql\")})\n\n\t// Version N\n\t//\n\t// In order to add an upgrade, simply append an array of MigrationFunc to the\n\t// the end of the \"migrations\" slice. The version numbers are determined by\n\t// the index in the array. The following code in comments show how you can\n\t// do a migration in 3 steps. If one of the steps fails, the others are not\n\t// executed.\n\t// If something goes wrong during the migration, all you need to do is return\n\t// an error that is not nil.\n\n\t/*\n\t\tm = append(m, steps{\n\t\t\tfunc(db *sql.Tx) error {\n\t\t\t\t// Execute random go code\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tExecuteSQLFile(\"YOUR_OWN_FILE.sql\"),\n\t\t\tfunc(db *sql.Tx) error {\n\t\t\t\t// Execute random go code\n\t\t\t\treturn nil\n\t\t\t},\n\t\t})\n\t*/\n\n\treturn m\n}",
"func (s Service) Available() ([]*Migration, error) {\n\tfiles, _ := filepath.Glob(filepath.Join(s.env.Directory, \"*.sql\")) // The only possible error here is a pattern error\n\n\tvar migrations []*Migration\n\tfor _, file := range files {\n\t\tmigration, err := NewMigration(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tmigrations = append(migrations, migration)\n\t}\n\n\tslice.Sort(migrations, func(i, j int) bool {\n\t\treturn migrations[i].Name < migrations[j].Name\n\t})\n\n\treturn migrations, nil\n}",
"func (d *SQLike) Migrations(tk *wanderer.Toolkit) ([]*wanderer.Migration, error) {\n\tif d.env.Migrations == nil || len(d.env.Migrations) == 0 {\n\t\treturn []*wanderer.Migration{}, nil\n\t}\n\n\treturn sqlike.LoadMigrations(filepath.Join(d.env.Migrations...))\n}",
"func (_m *MigrationRepository) GetMigrations() []migrationrunner.Migration {\n\tret := _m.Called()\n\n\tvar r0 []migrationrunner.Migration\n\tif rf, ok := ret.Get(0).(func() []migrationrunner.Migration); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]migrationrunner.Migration)\n\t\t}\n\t}\n\n\treturn r0\n}",
"func registerSchemaMigrator(m schemaMigrator) {\n\tmigrations = append(migrations, m)\n}",
"func (m mysqlDialect) GetAppliedMigrations(ctx context.Context, tx Queryer, tableName string) (migrations []*AppliedMigration, err error) {\n\tmigrations = make([]*AppliedMigration, 0)\n\n\tquery := fmt.Sprintf(`\n\t\tSELECT id, checksum, execution_time_in_millis, applied_at\n\t\tFROM %s\n\t\tORDER BY id ASC`, tableName)\n\trows, err := tx.QueryContext(ctx, query)\n\tif err != nil {\n\t\treturn migrations, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tmigration := AppliedMigration{}\n\n\t\tvar appliedAt mysqlTime\n\t\terr = rows.Scan(&migration.ID, &migration.Checksum, &migration.ExecutionTimeInMillis, &appliedAt)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Failed to GetAppliedMigrations. Did somebody change the structure of the %s table?: %w\", tableName, err)\n\t\t\treturn migrations, err\n\t\t}\n\t\tmigration.AppliedAt = appliedAt.Value\n\t\tmigrations = append(migrations, &migration)\n\t}\n\n\treturn migrations, err\n}",
"func getMigrations() *migrate.MemoryMigrationSource {\n\tsource := &migrate.MemoryMigrationSource{}\n\tfn := func(path string, f os.FileInfo, err error) error {\n\t\tif !f.IsDir() && strings.HasSuffix(f.Name(), \".sql\") {\n\t\t\tmigFile, err := os.Open(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmig, err := migrate.ParseMigration(path, migFile)\n\t\t\tmigFile.Close()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsource.Migrations = append(source.Migrations, mig)\n\t\t}\n\t\treturn nil\n\t}\n\twd, _ := os.Getwd()\n\terr := filepath.Walk(filepath.Join(wd, \"migrations\"), fn)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t\treturn nil\n\t}\n\treturn source\n}",
"func (s *SchemaEditor) PendingMigrations() (count int, err error) {\n\tif err = s.prepareSchemaMigrations(); err != nil {\n\t\treturn\n\t}\n\terr = s.pr.Do(func(plugin Pluginer) error {\n\t\tfor _, migration := range plugin.Migrations() {\n\t\t\tif isApplied, isError := s.IsAppliedMigration(migration, plugin); isError != nil {\n\t\t\t\treturn isError\n\t\t\t} else if isApplied == false {\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\treturn\n}",
"func (p postgresDialect) GetAppliedMigrations(ctx context.Context, tx Queryer, tableName string) (migrations []*AppliedMigration, err error) {\n\tmigrations = make([]*AppliedMigration, 0)\n\n\tquery := fmt.Sprintf(`\n\t\tSELECT id, checksum, execution_time_in_millis, applied_at\n\t\tFROM %s ORDER BY id ASC\n\t`, tableName)\n\trows, err := tx.QueryContext(ctx, query)\n\tif err != nil {\n\t\treturn migrations, err\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tmigration := AppliedMigration{}\n\t\terr = rows.Scan(&migration.ID, &migration.Checksum, &migration.ExecutionTimeInMillis, &migration.AppliedAt)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to GetAppliedMigrations. Did somebody change the structure of the %s table?: %w\", tableName, err)\n\t\t\treturn migrations, err\n\t\t}\n\t\tmigration.AppliedAt = migration.AppliedAt.In(time.Local)\n\t\tmigrations = append(migrations, &migration)\n\t}\n\n\treturn migrations, err\n}",
"func ListMigrations() (migrationsList []*Migrations, err error) {\n\to := NewOrm()\n\tqs := o.QueryTable(new(Migrations))\n\tqs = qs.OrderBy(\"CreatedAt\")\n\t_, err = qs.All(&migrationsList)\n\tif err != nil {\n\t\terr = errors.Trace(err)\n\t\treturn\n\t}\n\treturn\n}",
"func (f EmbedFileSystemMigrationSource) FindMigrations() ([]*migrate.Migration, error) {\n\treturn f.findMigrations()\n}",
"func Register(up, down func(DB) error) error {\n\t_, file, _, _ := runtime.Caller(1)\n\tversion, err := extractVersion(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallMigrations = append(allMigrations, Migration{\n\t\tVersion: version,\n\t\tUp: up,\n\t\tDown: down,\n\t})\n\treturn nil\n}",
"func registerMigrations(ctx context.Context, db dbutil.DB, outOfBandMigrationRunner *oobmigration.Runner) error {\n\tmigrators := map[int]oobmigration.Migrator{\n\t\tmigration.DiagnosticsCountMigrationID: migration.NewDiagnosticsCountMigrator(services.lsifStore, config.DiagnosticsCountMigrationBatchSize),\n\t\tmigration.DefinitionsCountMigrationID: migration.NewLocationsCountMigrator(services.lsifStore, \"lsif_data_definitions\", config.DefinitionsCountMigrationBatchSize),\n\t\tmigration.ReferencesCountMigrationID: migration.NewLocationsCountMigrator(services.lsifStore, \"lsif_data_references\", config.ReferencesCountMigrationBatchSize),\n\t}\n\n\tfor id, migrator := range migrators {\n\t\tif err := outOfBandMigrationRunner.Register(id, migrator, oobmigration.MigratorOptions{Interval: time.Second}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func ApplyMigrations(config *config.Config, createConnector func(*config.Config) db.Connector, createLoader func(*config.Config) loader.Loader) []types.Migration {\n\tdiskMigrations := LoadDiskMigrations(config, createLoader)\n\tdbMigrations := LoadDBMigrations(config, createConnector)\n\tmigrationsToApply := migrations.ComputeMigrationsToApply(diskMigrations, dbMigrations)\n\n\tlog.Printf(\"Found [%d] migrations to apply ==> OK\", len(migrationsToApply))\n\tdoApplyMigrations(migrationsToApply, config, createConnector)\n\n\tnotifier := notifications.CreateNotifier(config)\n\ttext := fmt.Sprintf(\"Migrations applied: %d\", len(migrationsToApply))\n\tresp, err := notifier.Notify(text)\n\n\tif err != nil {\n\t\tlog.Printf(\"Notifier err: %q\", err)\n\t} else {\n\t\tlog.Printf(\"Notifier response: %q\", resp)\n\t}\n\n\treturn migrationsToApply\n}",
"func (m *User) GetManagedAppRegistrations()([]ManagedAppRegistrationable) {\n return m.managedAppRegistrations\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Read MeasurementsGET sends measurements json response | func MeasurementsGET(w http.ResponseWriter, r *http.Request) {
// Get condition GET parameter
condition := r.URL.Query().Get("condition")
// Validate condition
reg, errReg := regexp.Compile("^(\\w+[<>=]+\\w+(\\s(and|or)\\s?)?)*$")
if errReg != nil {
log.Println(errReg)
response.SendError(w, http.StatusInternalServerError, friendlyError)
return
}
if !reg.MatchString(condition) {
log.Println("Wrong condition statement: " + condition)
response.SendError(w, http.StatusInternalServerError, itemsNotFound)
return
}
// Get all items
data, err := feinstaub.ReadMeasurements(condition)
if err != nil {
log.Println(err)
response.SendError(w, http.StatusInternalServerError, friendlyError)
return
}
// Send json data
if len(data) == 0 {
response.Send(w, http.StatusOK, itemsNotFound, 0, nil)
} else {
response.Send(w, http.StatusOK, itemsFound, len(data), data)
}
} | [
"func (a *UnsupportedApiService) MeasurementsGET(ctx context.Context, measurementConfigId string) (MeasurementConfig, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue MeasurementConfig\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/measurements/{measurementConfigId}\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"measurementConfigId\"+\"}\", fmt.Sprintf(\"%v\", measurementConfigId), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/problem+json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v MeasurementConfig\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 401 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 403 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 404 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 406 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 429 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func (h *Http) Measurements(p Params) (<-chan *Measurement, error) {\n var qstr []string\n var pk string\n\n for k, v := range p {\n switch k {\n case \"page\":\n v, ok := v.(int64)\n if !ok {\n return nil, fmt.Errorf(\"Invalid %s parameter, must be int64\", k)\n }\n qstr = append(qstr, fmt.Sprintf(\"%s=%d\", k, v))\n case \"pk\":\n v, ok := v.(string)\n if !ok {\n return nil, fmt.Errorf(\"Invalid %s parameter, must be string\", k)\n }\n pk = v\n default:\n return nil, fmt.Errorf(\"Invalid parameter %s\", k)\n }\n }\n\n url := MeasurementsUrl\n if pk != \"\" {\n url = fmt.Sprintf(\"%s/%s\", MeasurementsUrl, neturl.PathEscape(pk))\n }\n url += \"?format=json\"\n if len(qstr) > 0 {\n url += \"&\" + strings.Join(qstr, \"&\")\n }\n\n r, err := http.Get(url)\n if err != nil {\n return nil, fmt.Errorf(\"http.Get(%s): %s\", url, err.Error())\n }\n\n ch := make(chan *Measurement)\n go func() {\n d := json.NewDecoder(r.Body)\n defer r.Body.Close()\n\n if pk != \"\" {\n var m Measurement\n if err := d.Decode(&m); err != nil {\n if err != io.EOF {\n m := &Measurement{ParseError: fmt.Errorf(\"json.Decode(%s): %s\", url, err.Error())}\n ch <- m\n }\n } else {\n ch <- &m\n }\n close(ch)\n return\n }\n\n var r struct {\n Results []*Measurement `json:\"results\"`\n }\n if err := d.Decode(&r); err != nil {\n if err != io.EOF {\n m := &Measurement{ParseError: fmt.Errorf(\"json.Decode(%s): %s\", url, err.Error())}\n ch <- m\n }\n } else {\n for _, i := range r.Results {\n ch <- i\n }\n }\n close(ch)\n }()\n\n return ch, nil\n}",
"func HTTPMeasures(w rest.ResponseWriter, r *rest.Request) {\n\n\tfilters := backend.TrafficMeasureFilter{\n\t\tContext: r.PathParam(\"context\"),\n\t\tPool: r.PathParam(\"clientId\"),\n\t\tFromDate: time.Time{},\n\t\tToDate: time.Time{},\n\t}\n\n\tmeasures := backend.GetGraphData(filters)\n\tw.WriteJson(&measures)\n}",
"func PlotlyHTTPMeasures(w rest.ResponseWriter, r *rest.Request) {\n\n\tfilters := backend.TrafficMeasureFilter{\n\t\tContext: r.PathParam(\"context\"),\n\t\tPool: r.PathParam(\"clientId\"),\n\t\tFromDate: time.Time{},\n\t\tToDate: time.Time{},\n\t}\n\n\tmeasures := backend.GetPlotlyGraphData(filters)\n\tw.WriteJson(&measures)\n}",
"func (a *UnsupportedApiService) MeasurementLinkListMeasurementsGET(ctx context.Context) (MeasurementConfigLinkList, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue MeasurementConfigLinkList\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/measurements\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\", \"application/problem+json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v MeasurementConfigLinkList\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 401 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 403 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 404 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 406 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\tif localVarHttpResponse.StatusCode == 429 {\n\t\t\tvar v ProblemDetails\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func getMetrics(w http.ResponseWriter, req *http.Request) error {\n\tconst pointsN = 1024\n\tpoints := make([]dataPoint, pointsN)\n\tmultiplier := req.URL.Query().Get(\"multiplier\")\n\tmultiplierInt := 1\n\tif multiplier != \"\" {\n\t\tvar err error\n\t\tmultiplierInt, err = strconv.Atoi(multiplier)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tfor i := 0; i < pointsN; i++ {\n\t\tts := time.Now().Add(time.Second * time.Duration(-i)).UTC()\n\t\tpoints[i].Time = ts\n\t\tpoints[i].Value = math.Sin(float64(ts.Unix())/10) * float64(multiplierInt)\n\t}\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\treturn json.NewEncoder(w).Encode(metrics{\n\t\tDataPoints: points,\n\t})\n}",
"func (s *MeasurementService) GetMeasurements(ctx context.Context, opt *MeasurementCollectionOptions) (*MeasurementCollection, *Response, error) {\n\tdata := new(MeasurementCollection)\n\tresp, err := s.client.SendRequest(ctx, RequestOptions{\n\t\tMethod: \"GET\",\n\t\tPath: \"measurement/measurements\",\n\t\tQuery: opt,\n\t\tResponseData: data,\n\t})\n\treturn data, resp, err\n}",
"func measuresHandler(w http.ResponseWriter, r *http.Request) {\n\n\tif r.Method == \"GET\" {\n\t\turlParts := strings.Split(r.URL.Path, \"/\")\n\t\tif len(urlParts) == 6 {\n\n\t\t\t// Get all the data from the url\n\t\t\taeroport := urlParts[4]\n\t\t\tnature := urlParts[5]\n\t\t\tqueryValues := r.URL.Query()\n\t\t\tbeginDate := queryValues.Get(\"beginDate\")\n\t\t\tbeginTime, err1 := time.Parse(layoutHeure, beginDate)\n\t\t\tendDate := queryValues.Get(\"endDate\")\n\t\t\tendTime, err2 := time.Parse(layoutHeure, endDate)\n\t\t\tdateOk := beginTime.Before(endTime)\n\n\t\t\tif err1 == nil && err2 == nil && dateOk {\n\n\t\t\t\tvar measuresStruct donneestruct.Measures\n\t\t\t\tvar measures []donneestruct.Measure\n\n\t\t\t\t// Get data for each year\n\t\t\t\tfor i := beginTime.Year(); i <= endTime.Year(); i++ {\n\t\t\t\t\tkey := aeroport + \":\" + nature + \":\" + strconv.Itoa(i)\n\n\t\t\t\t\tres := getDataBetweenDates(key, beginTime, endTime)\n\n\t\t\t\t\tfor _, value := range res {\n\t\t\t\t\t\tmeasure := donneestruct.Measure{CaptorID: value.CapteurID, Value: value.Valeur, Date: value.Date}\n\t\t\t\t\t\tmeasures = append(measures, measure)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmeasuresStruct = donneestruct.Measures{Measures: measures}\n\n\t\t\t\tw.WriteHeader(http.StatusFound)\n\t\t\t\twriteJSON(w, measuresStruct)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tw.WriteHeader(http.StatusBadRequest)\n}",
"func (r *UnitOfMeasureRequest) Get(ctx context.Context) (resObj *UnitOfMeasure, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}",
"func (h *Http) MeasurementResults(p Params) (<-chan *measurement.Result, error) {\n var qstr []string\n var pk string\n var fragmented bool\n\n for k, v := range p {\n switch k {\n case \"pk\":\n v, ok := v.(string)\n if !ok {\n return nil, fmt.Errorf(\"Invalid %s parameter, must be string\", k)\n }\n pk = v\n case \"start\":\n fallthrough\n case \"stop\":\n v, ok := v.(int64)\n if !ok {\n return nil, fmt.Errorf(\"Invalid %s parameter, must be int64\", k)\n }\n qstr = append(qstr, fmt.Sprintf(\"%s=%d\", k, v))\n case \"probe_ids\":\n fallthrough\n case \"anchors-only\":\n fallthrough\n case \"public-only\":\n return nil, fmt.Errorf(\"Unimplemented parameter %s\", k)\n case \"fragmented\":\n v, ok := v.(bool)\n if !ok {\n return nil, fmt.Errorf(\"Invalid %s parameter, must be bool\", k)\n }\n fragmented = v\n default:\n return nil, fmt.Errorf(\"Invalid parameter %s\", k)\n }\n }\n\n if pk == \"\" {\n return nil, fmt.Errorf(\"Required parameter pk missing\")\n }\n\n url := fmt.Sprintf(\"%s/%s/results\", MeasurementsUrl, neturl.PathEscape(pk))\n if fragmented {\n url += \"?format=txt\"\n } else {\n url += \"?format=json\"\n }\n if len(qstr) > 0 {\n url += \"&\" + strings.Join(qstr, \"&\")\n }\n\n return h.getMeasurementResults(url, fragmented)\n}",
"func GetMeasurements(deviceEUI string, start *time.Time, end *time.Time, config config.Database) ([]structs.Measurement, error) {\n\n\tdeviceEUIBytes, err := hex.DecodeString(deviceEUI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdb, err := openConnection(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer db.Close()\n\n\tvar data structs.Measurement\n\tvar dataString string\n\tvar time time.Time\n\n\trows, err := db.Query(sqlStatementGetMeasurements, deviceEUIBytes, start, end)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar measurements []structs.Measurement\n\n\tfor rows.Next() {\n\t\tdata = structs.Measurement{}\n\t\trows.Scan(&dataString, &time)\n\n\t\tjson.Unmarshal([]byte(dataString), &data)\n\t\t// Channels contains the same fields gain, probably filter for only capital letters\n\t\tjson.Unmarshal([]byte(dataString), &data.Channels)\n\n\t\tdata.Time = time\n\t\t// This is a workaround: removes entries that start lowercease, since channels all start uppercase\n\t\tfor key := range data.Channels {\n\t\t\tif !unicode.IsUpper(rune(key[0])) {\n\t\t\t\tdelete(data.Channels, key)\n\t\t\t}\n\t\t}\n\n\t\t// If there are measurements, add it to the list\n\t\tif len(data.Channels) > 0 {\n\t\t\tmeasurements = append(measurements, data)\n\t\t}\n\n\t}\n\n\treturn measurements, nil\n}",
"func GetTempMeasurements(c *gin.Context) {\n\tdeviceName := c.Param(\"device_name\")\n\tstartTime, endTime := helpers.ParamReader(c.Param(\"start_time\"),\n\t\tc.Param(\"end_time\"))\n\ttempMeasurement, err := database.GetTempMeasurements(c, deviceName,\n\t\tstartTime, endTime)\n\tif err != nil {\n\t\tc.Status(http.StatusNotFound)\n\t} else {\n\t\tc.JSON(http.StatusOK, tempMeasurement)\n\t}\n}",
"func (s *MeasurementService) GetMeasurement(ctx context.Context, ID string) (*Measurement, *Response, error) {\n\tdata := new(Measurement)\n\tresp, err := s.client.SendRequest(ctx, RequestOptions{\n\t\tMethod: \"GET\",\n\t\tPath: \"measurement/measurements/\" + ID,\n\t\tResponseData: data,\n\t})\n\treturn data, resp, err\n}",
"func (a *Client) GetMeasuresByDeviceUsingGET(params *GetMeasuresByDeviceUsingGETParams, authInfo runtime.ClientAuthInfoWriter) (*GetMeasuresByDeviceUsingGETOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetMeasuresByDeviceUsingGETParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"getMeasuresByDeviceUsingGET\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/v1/tenant/{tenantId}/devices/{deviceId}/measures\",\n\t\tProducesMediaTypes: []string{\"*/*\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\"},\n\t\tParams: params,\n\t\tReader: &GetMeasuresByDeviceUsingGETReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetMeasuresByDeviceUsingGETOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for getMeasuresByDeviceUsingGET: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func getApplicationMeasurement() endpointHandler {\n\treturn func(httpWriter http.ResponseWriter, httpRequest *http.Request) error {\n\t\tlog.Trace(\"resource/measure:getApplicationMeasurement() Entering\")\n\t\tdefer log.Trace(\"resource/measure:getApplicationMeasurement() Leaving\")\n\n\t\tlog.Debugf(\"resource/measure:getApplicationMeasurement() Request: %s\", httpRequest.URL.Path)\n\n\t\tcontentType := httpRequest.Header.Get(\"Content-Type\")\n\t\tif contentType != \"application/xml\" {\n\t\t\tlog.Errorf(\"resource/measure:getApplicationMeasurement() %s - Invalid content-type '%s'\", message.InvalidInputBadParam, contentType)\n\t\t\treturn &endpointError{Message: \"Invalid content-type\", StatusCode: http.StatusBadRequest}\n\t\t}\n\n\t\t// receive a manifest from hvs in the request body\n\t\tmanifestXml, err := ioutil.ReadAll(httpRequest.Body)\n\t\tif err != nil {\n\t\t\tseclog.WithError(err).Errorf(\"resource/measure:getApplicationMeasurement() %s - Error reading manifest xml\", message.InvalidInputBadParam)\n\t\t\treturn &endpointError{Message: \"Error reading manifest xml\", StatusCode: http.StatusBadRequest}\n\t\t}\n\n\t\t// make sure the xml is well formed, all other validation will be\n\t\t// peformed by 'measure' cmd line below\n\t\terr = xml.Unmarshal(manifestXml, new(interface{}))\n\t\tif err != nil {\n\t\t\tsecLog.WithError(err).Errorf(\"resource/measure:getApplicationMeasurement() %s - Invalid xml format\", message.InvalidInputBadParam)\n\t\t\treturn &endpointError{Message: \"Error: Invalid XML format\", StatusCode: http.StatusBadRequest}\n\t\t}\n\n\t\t// this should probably be done in wml --> if the wml log file is not yet created,\n\t\t// 'measure' will fail. for now, create the file before calling 'measure'.\n\t\tif _, err := os.Stat(WML_LOG_FILE); os.IsNotExist(err) {\n\t\t\t_, err = os.OpenFile(WML_LOG_FILE, os.O_RDONLY|os.O_CREATE, 0600)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorf(\"resource/measure:getApplicationMeasurement() - Unable to open file\")\n\t\t\t\treturn &endpointError{Message: \"Error: Unable to open log file\", StatusCode: http.StatusInternalServerError}\n\t\t\t}\n\t\t}\n\n\t\t// make sure 'measure' is not a symbolic link before executing it\n\t\tmeasureExecutable, err := os.Lstat(constants.TBootXmMeasurePath)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"resource/measure:getApplicationMeasurement() - Unable to stat tboot path\")\n\t\t\treturn &endpointError{Message: \"Error: Unable to stat tboot path\", StatusCode: http.StatusInternalServerError}\n\t\t}\n\t\tif measureExecutable.Mode()&os.ModeSymlink == os.ModeSymlink {\n\t\t\tsecLog.WithError(err).Errorf(\"resource/measure:getApplicationMeasurement() %s - 'measure' is a symbolic link\", message.InvalidInputBadParam)\n\t\t\treturn &endpointError{Message: \"Error: Invalid 'measure' file\", StatusCode: http.StatusInternalServerError}\n\t\t}\n\n\t\t// call /opt/tbootxml/bin/measure and return the xml from stdout\n\t\t// 'measure <manifestxml> /'\n\t\tcmd := exec.Command(constants.TBootXmMeasurePath, string(manifestXml), \"/\")\n\t\tcmd.Env = append(os.Environ(), \"WML_LOG_FILE=\"+WML_LOG_FILE)\n\n\t\tstdout, err := cmd.StdoutPipe()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"resource/measure:getApplicationMeasurement() %s - Error getting measure output\", message.AppRuntimeErr)\n\t\t\treturn &endpointError{Message: \"Error processing request\", StatusCode: http.StatusInternalServerError}\n\t\t}\n\n\t\terr = cmd.Start()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"resource/measure:getApplicationMeasurement() %s - Failed to run: %s\", message.AppRuntimeErr, constants.TBootXmMeasurePath)\n\t\t\treturn &endpointError{Message: \"Error processing request\", StatusCode: http.StatusInternalServerError}\n\n\t\t}\n\n\t\tmeasureBytes, _ := ioutil.ReadAll(stdout)\n\t\terr = cmd.Wait()\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorf(\"resource/measure:getApplicationMeasurement() %s - %s returned '%s'\", message.AppRuntimeErr, constants.TBootXmMeasurePath, string(measureBytes))\n\t\t\treturn &endpointError{Message: \"Error processing request\", StatusCode: http.StatusInternalServerError}\n\t\t}\n\n\t\t// make sure we got valid xml from measure\n\t\terr = xml.Unmarshal(measureBytes, new(interface{}))\n\t\tif err != nil {\n\t\t\tseclog.WithError(err).Errorf(\"resource/measure:getApplicationMeasurement() %s - Invalid measurement xml %s: %s\", message.AppRuntimeErr, httpRequest.URL.Path, string(measureBytes))\n\t\t\treturn &endpointError{Message: \"Error processing request\", StatusCode: http.StatusInternalServerError}\n\t\t}\n\n\t\thttpWriter.WriteHeader(http.StatusOK)\n\t\t_, _ = bytes.NewBuffer(measureBytes).WriteTo(httpWriter)\n\t\treturn nil\n\t}\n}",
"func (s *sendClient) readMetrics() ([]byte, error) {\n\tresp, err := http.Get(s.readURL.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer func() {\n\t\t_, _ = io.Copy(io.Discard, resp.Body)\n\t\t_ = resp.Body.Close()\n\t}()\n\n\treturn body, nil\n}",
"func (loadTimer *workersPool) GetMeasurements(rawurl string, nrOfTries int, thumbnailsDir string) (*PageMeasurements, error) {\n\t_, err := url.ParseRequestURI(rawurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif nrOfTries < 1 {\n\t\treturn nil, fmt.Errorf(\"You have to specify at least one try to get any result\")\n\t}\n\n\tphantom, err := try(nrOfTries, (*loadTimer).getPhantom)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer (*loadTimer).releasePhantom(phantom)\n\n\tperformance, err := getMeasurementsInternal(phantom, rawurl, thumbnailsDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn performance, nil\n}",
"func httpGetMetrics(t *testing.T) []string {\n\tresp, err := http.Get(fmt.Sprintf(\"http://localhost%v%v\", addr, endpoint))\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn []string{}\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn []string{}\n\t}\n\tlines := strings.Split(string(body), \"\\n\")\n\tif len(lines) == 0 {\n\t\tt.Error(\"httpGetMetrics returned empty response\")\n\t\treturn []string{}\n\t}\n\treturn lines\n}",
"func GetData(w http.ResponseWriter, r *http.Request) {\n\tfrom := r.URL.Query().Get(\"from\")\n\tif from == \"\" {\n\t\tfrom = fmt.Sprintf(\"%d\", time.Now().Add(-10*time.Minute).UnixNano()/1000000000)\n\t}\n\tr.Body.Close()\n\tfromI, err := strconv.ParseInt(from, 10, 64)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"BAD 'from' parameter\"))\n\t\treturn\n\t}\n\twindow := r.URL.Query().Get(\"window\")\n\tif window == \"\" {\n\t\twindow = \"300\"\n\t}\n\twindowI, err := strconv.ParseInt(window, 10, 64)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"BAD 'window' parameter\"))\n\t\treturn\n\t}\n\trv, err := qei.GetData(time.Unix(fromI, 0), time.Duration(windowI)*time.Second)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(rv.JsonBytes())\n\tr.Body.Close()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
QueryStringParser converts URL querystring into a slice of `FilteredResult.` Given the querystring `?first_name=john:eq:and&last_name=doe:eq` | func QueryStringParser(queryStr string, filters map[string]string) []FilteredResult {
//define custom map type to allowduplicate keys
type Map struct {
Key string
Value string
}
params := []Map{}
searchFilters := []FilteredResult{}
parts := strings.Split(queryStr, "&")
//build a key/value map of the querystring by
//storing the query as key and the fragment as the value
for _, part := range parts {
split := strings.Split(part, "=")
if len(split) > 1 && split[1] != "" {
params = append(params, Map{
Key: split[0],
Value: split[1],
})
} else {
params = append(params, Map{
Key: split[0],
Value: "",
})
}
}
//
for _, param := range params {
for name, varType := range filters {
if param.Key == name {
esc, _ := url.QueryUnescape(param.Value)
parseValue, operator, condition := RHSParser(esc, varType)
searchFilters = append(searchFilters, FilteredResult{
Field: param.Key,
Type: varType,
Value: parseValue,
Operator: operator,
Condition: condition,
})
break
}
}
}
return searchFilters
} | [
"func QueryStringParser(query string) (*Query, error) {\n\tvar (\n\t\tkeyStart, keyEnd int\n\t\tvalStart, valEnd int\n\t\tfirstInfoHash string\n\n\t\tonKey = true\n\t\thasInfoHash = false\n\n\t\tq = &Query{\n\t\t\tInfoHashes: nil,\n\t\t\tParams: make(map[string]string),\n\t\t}\n\t)\n\n\tfor i, length := 0, len(query); i < length; i++ {\n\t\tseparator := query[i] == '&' || query[i] == ';' || query[i] == '?'\n\t\tif separator || i == length-1 {\n\t\t\tif onKey {\n\t\t\t\tkeyStart = i + 1\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif i == length-1 && !separator {\n\t\t\t\tif query[i] == '=' {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tvalEnd = i\n\t\t\t}\n\t\t\tkeyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// The start can be greater than the end when the query contains an invalid\n\t\t\t// empty query value\n\t\t\tif valStart > valEnd {\n\t\t\t\treturn nil, errors.New(\"Malformed request\")\n\t\t\t}\n\n\t\t\tvalStr, err := url.QueryUnescape(query[valStart : valEnd+1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tq.Params[strings.ToLower(keyStr)] = valStr\n\n\t\t\tif keyStr == \"info_hash\" {\n\t\t\t\tif hasInfoHash {\n\t\t\t\t\t// Multiple info hashes\n\t\t\t\t\tif q.InfoHashes == nil {\n\t\t\t\t\t\tq.InfoHashes = []string{firstInfoHash}\n\t\t\t\t\t}\n\n\t\t\t\t\tq.InfoHashes = append(q.InfoHashes, valStr)\n\t\t\t\t} else {\n\t\t\t\t\tfirstInfoHash = valStr\n\t\t\t\t\thasInfoHash = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tonKey = true\n\t\t\tkeyStart = i + 1\n\t\t} else if query[i] == '=' {\n\t\t\tonKey = false\n\t\t\tvalStart = i + 1\n\t\t} else if onKey {\n\t\t\tkeyEnd = i\n\t\t} else {\n\t\t\tvalEnd = i\n\t\t}\n\t}\n\n\treturn q, nil\n}",
"func ExtractPaginationQueryString(qry url.Values) map[string]interface{} {\n\n\tpg := map[string]interface{}{\n\t\t\"Page\": 1,\n\t\t\"PerPage\": 15,\n\t\t\"Filter\": \"\",\n\t\t\"Order\": \"created_at\",\n\t\t\"OrderType\": \"desc\",\n\t\t\"NoPagination\": false,\n\t}\n\n\t// Extract noPagination from query\n\tparamNoPagination := qry.Get(\"noPagination\")\n\tif paramNoPagination == \"0\" || paramNoPagination == \"\" {\n\t\tpg[\"NoPagination\"] = false\n\t} else {\n\t\tpg[\"NoPagination\"] = true\n\t}\n\n\t// Extract Page from query\n\tif paramPage, err := strconv.Atoi(qry.Get(\"page\")); err == nil {\n\t\tpg[\"Page\"] = paramPage\n\t}\n\n\t// Extract item per page\n\tif paramPerPage, err := strconv.Atoi(qry.Get(\"perPage\")); err == nil {\n\t\tpg[\"PerPage\"] = paramPerPage\n\t}\n\n\t// Extract needed filter\n\tif qry.Get(\"filter\") != \"\" {\n\t\tpg[\"Filter\"] = qry.Get(\"filter\")\n\t}\n\n\t// Extract needed filter\n\tif qry.Get(\"IsPaginate\") == \"\" {\n\t\tpg[\"IsPaginate\"] = false\n\t} else if qry.Get(\"IsPaginate\") == \"false\" {\n\t\tpg[\"IsPaginate\"] = false\n\t} else {\n\t\tpg[\"IsPaginate\"] = false\n\t}\n\n\t// Extract order by direction\n\tif qry.Get(\"order\") != \"\" {\n\t\tpg[\"Order\"] = qry.Get(\"order\")\n\t\tswitch qry.Get(\"orderType\") {\n\t\tcase \"asc\":\n\t\t\tpg[\"OrderType\"] = \"asc\"\n\t\tcase \"desc\":\n\t\t\tpg[\"OrderType\"] = \"desc\"\n\t\t}\n\t}\n\n\treturn pg\n}",
"func (f *Filter) QueryString() (q string, args []interface{}) {\n\tvar filters []string\n\n\tif len(f.AddedAfter) > 0 {\n\t\tfilter, newArgs := filterAddedAfter(f.AddedAfter)\n\t\tif filter != \"\" {\n\t\t\tfilters = append(filters, filter)\n\t\t\targs = append(args, newArgs...)\n\t\t}\n\t}\n\n\tif len(f.Versions) > 0 {\n\t\tfilter, newArgs := filterVersion(f.Versions)\n\t\tif filter != \"\" {\n\t\t\tfilters = append(filters, filter)\n\t\t\targs = append(args, newArgs...)\n\t\t}\n\t}\n\n\tfor field, raws := range filterMapFieldToRawStrings(f) {\n\t\tif len(raws) > 0 {\n\t\t\tfilter, newArgs := filterCreator(raws, field)\n\t\t\tfilters = append(filters, filter)\n\t\t\targs = append(args, newArgs...)\n\t\t}\n\t}\n\n\treturn strings.Join(filters, \" and \"), args\n}",
"func (f *Filter) QueryString() (q string, args []interface{}) {\n\tvar filters []string\n\n\tif !f.AddedAfter.IsZero() {\n\t\tfilter, newArgs := filterAddedAfter(f.AddedAfter.String())\n\t\tif filter != \"\" {\n\t\t\tfilters = append(filters, filter)\n\t\t\targs = append(args, newArgs...)\n\t\t}\n\t}\n\n\tif len(f.Versions) > 0 {\n\t\tfilter, newArgs := filterVersion(f.Versions)\n\t\tif filter != \"\" {\n\t\t\tfilters = append(filters, filter)\n\t\t\targs = append(args, newArgs...)\n\t\t}\n\t}\n\n\tfor field, raws := range filterMapFieldToRawStrings(f) {\n\t\tif len(raws) > 0 {\n\t\t\tfilter, newArgs := filterCreator(raws, field)\n\t\t\tfilters = append(filters, filter)\n\t\t\targs = append(args, newArgs...)\n\t\t}\n\t}\n\n\treturn strings.Join(filters, \" and \"), args\n}",
"func ParseQueryString(q string) (map[string]interface{}, error) {\n\tuv, err := url.ParseQuery(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmp := map[string]interface{}{}\n\tfor k, v := range uv {\n\t\tmp[k] = v[0]\n\t}\n\treturn mp, err\n}",
"func ParseQueryString(values map[string][]string) *NgGrid {\n\tg := &NgGrid{}\n\tif value := values[QS_SORT_DIRECTION]; len(value) != 0 {\n\t\tg.SortDirection = value[0]\n\t}\n\tif value := values[QS_SORT_FIELD]; len(value) != 0 {\n\t\tg.SortField = value[0]\n\t}\n\tif value := values[QS_QUERY]; len(value) != 0 {\n\t\tg.Query = value[0]\n\t}\n\tif value := values[QS_PAGE_NUMBER]; len(value) != 0 {\n\t\tpn, err := strconv.Atoi(value[0])\n\t\tif err == nil {\n\t\t\tg.PageNumber = int64(pn)\n\t\t}\n\t}\n\tif value := values[QS_PAGE_SIZE]; len(value) != 0 {\n\t\tpn, err := strconv.Atoi(value[0])\n\t\tif err == nil {\n\t\t\tg.PageSize = int64(pn)\n\t\t}\n\t}\n\n\tif g.PageNumber < 1 {\n\t\tg.PageNumber = 1\n\t}\n\n\treturn g\n}",
"func ParseQuery(ulrString string) *domain.ListOptions {\n}",
"func ParseQuery(ulrString string) *domain.ListOptions {\n}",
"func (lo *ListOptions) FromQuery(q url.Values) error {\n\tcidq := q.Get(\"cid\")\n\tif len(cidq) > 0 {\n\t\tfor _, cstr := range strings.Split(cidq, \",\") {\n\t\t\tc, err := types.DecodeCid(cstr)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error decoding cid %s: %w\", cstr, err)\n\t\t\t}\n\t\t\tlo.Cids = append(lo.Cids, c)\n\t\t}\n\t}\n\n\tn := q.Get(\"name\")\n\tif len(n) > 255 {\n\t\treturn fmt.Errorf(\"error in 'name' query param: longer than 255 chars\")\n\t}\n\tlo.Name = n\n\n\tlo.MatchingStrategy = MatchingStrategyFromString(q.Get(\"match\"))\n\tif lo.MatchingStrategy == MatchingStrategyUndefined {\n\t\tlo.MatchingStrategy = MatchingStrategyExact // default\n\t}\n\tstatusStr := q.Get(\"status\")\n\tlo.Status = StatusFromString(statusStr)\n\t// FIXME: This is a bit lazy, as \"invalidxx,pinned\" would result in a\n\t// valid \"pinned\" filter.\n\tif statusStr != \"\" && lo.Status == StatusUndefined {\n\t\treturn fmt.Errorf(\"error decoding 'status' query param: no valid filter\")\n\t}\n\n\tif bef := q.Get(\"before\"); bef != \"\" {\n\t\terr := lo.Before.UnmarshalText([]byte(bef))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error decoding 'before' query param: %s: %w\", bef, err)\n\t\t}\n\t}\n\n\tif after := q.Get(\"after\"); after != \"\" {\n\t\terr := lo.After.UnmarshalText([]byte(after))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error decoding 'after' query param: %s: %w\", after, err)\n\t\t}\n\t}\n\n\tif v := q.Get(\"limit\"); v != \"\" {\n\t\tlim, err := strconv.ParseUint(v, 10, 64)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error parsing 'limit' query param: %s: %w\", v, err)\n\t\t}\n\t\tlo.Limit = lim\n\t} else {\n\t\tlo.Limit = 10 // implicit default\n\t}\n\n\tif meta := q.Get(\"meta\"); meta != \"\" {\n\t\terr := json.Unmarshal([]byte(meta), &lo.Meta)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error unmarshalling 'meta' query param: %s: %w\", meta, err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (f *GeoSearch) ParseQueryString() error {\n\terr := ParseQueryString(f)\n\n\tif f.Path == \"\" && f.Folder != \"\" {\n\t\tf.Path = f.Folder\n\t}\n\n\treturn err\n}",
"func ParseQueryString(param string, request *http.Request, params imageserver.Params) {\n\ts := request.URL.Query().Get(param)\n\tif s != \"\" {\n\t\tparams.Set(param, s)\n\t}\n}",
"func AddFiltersFromQueryParams(r *http.Request, filterDetails ...string) ([]QueryProcessor, error) {\n\tqueryParams := r.URL.Query()\n\tfilters := make([]QueryProcessor, 0)\n\tfor _, filterNameAndTypeStr := range filterDetails {\n\t\tfilterNameAndType := strings.Split(filterNameAndTypeStr, \":\")\n\t\tfilterValueAsStr := queryParams.Get(filterNameAndType[0])\n\t\tif filterValueAsStr != \"\" {\n\t\t\tif len(filterNameAndType) > 1 && filterNameAndType[1] == \"datetime\" {\n\t\t\t\tfilterValueAsTime, err := time.Parse(time.RFC3339, filterValueAsStr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, microappError.NewValidationError(\"Key_InvalidFields\", map[string]string{filterNameAndType[0]: \"Key_InvalidValue\"})\n\t\t\t\t}\n\t\t\t\tfilters = append(filters, Filter(fmt.Sprintf(\"%v = ?\", filterNameAndType[0]), filterValueAsTime))\n\t\t\t} else {\n\t\t\t\tfilters = append(filters, Filter(fmt.Sprintf(\"%v = ?\", filterNameAndType[0]), filterValueAsStr))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn filters, nil\n}",
"func ParsePathAndQuery (r *http.Request, path string, pathVars map[int]string) ([]string, map[string][]string) {\n\n\tvar pathParts []string\n\tqueryParams := r.URL.Query()\n\n\tif len(path) > 0 {\n\t\tpathParts = strings.Split(path, \"/\")\n\t}\n\n\t// pull out any vars\n\ti := 0\t// this keeps track of the number of vars we've pulled out so we can\n\t\t\t// offset the now-smaller remaining index since we extract as we go\n\tfor k, v := range pathVars {\n\t\tif len(pathParts) > (k-i) {\n\t\t\tqueryParams.Add(v, pathParts[k-i])\n\t\t\tpathParts = append(pathParts[:k-i], pathParts[k-i+1:]...)\n\t\t\ti++\n\t\t}\n\t}\n\n\treturn pathParts, queryParams\n\n}",
"func (a *AuthenticationRequest) FromQueryString(u url.Values) error {\n\treturn querystring.Decode(u, a)\n}",
"func ReparseQuery(r *http.Request) {\n\tif !strings.ContainsRune(r.URL.Path, '?') {\n\t\treturn\n\t}\n\tq := r.URL.Query()\n\ttmpURL, err := url.Parse(r.URL.Path)\n\tdebug.AssertNoErr(err)\n\tfor k, v := range tmpURL.Query() {\n\t\tq.Add(k, strings.Join(v, \",\"))\n\t}\n\tr.URL.Path = tmpURL.Path\n\tr.URL.RawQuery = q.Encode()\n}",
"func NewQueryParams(q, p, pp, sort, filter string) QueryParam {\n\tvar qp QueryParam\n\n\tif q != \"\" {\n\t\tqp.Query = q\n\t}\n\n\tpage, err := strconv.Atoi(p)\n\tif err != nil {\n\t\tpage = pageDef\n\t}\n\tqp.Page = page\n\n\tperPage, err := strconv.Atoi(pp)\n\tif err != nil {\n\t\tperPage = perPageDef\n\t}\n\tqp.PerPage = perPage\n\n\tif sortVals := strings.Split(sort, sortFltrSeparator); len(sortVals) == 2 {\n\t\tqp.Sort = map[string]string{sortVals[0]: sortVals[1]}\n\t}\n\n\tif ftrVal := strings.Split(filter, fltrSeparator); len(ftrVal) >= 1 {\n\t\tfilters := make(map[string]string, len(ftrVal))\n\t\tfor _, fltr := range ftrVal {\n\t\t\tif f := strings.Split(fltr, sortFltrSeparator); len(f) == 2 {\n\t\t\t\tfilters[f[0]] = f[1]\n\t\t\t}\n\t\t}\n\t\tqp.Filter = filters\n\t}\n\n\treturn qp\n}",
"func (a *API) ParseQuery(ctx *fasthttp.RequestCtx) map[string]string {\n\tqs, _ := url.ParseQuery(string(ctx.URI().QueryString()))\n\tvalues := make(map[string]string)\n\tfor key, val := range qs {\n\t\tvalues[key] = val[0]\n\t}\n\n\treturn values\n}",
"func FromQuery(key string) TokenExtractor {\n\treturn func(r *http.Request) (string, error) {\n\t\treturn r.URL.Query().Get(key), nil\n\t}\n}",
"func (ri *RequestInfo) PopulateQuery(qs url.Values) {\n\tfor key, value := range ri.queryParams {\n\t\tif slice, isSlice := value.([]string); isSlice {\n\t\t\tqs[key] = slice\n\t\t} else {\n\t\t\tqs.Add(key, value.(string))\n\t\t}\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RHSParser separates the fragment part of the query string into three parts value, comparison operator (=, >, , =, LIKE) and logical operator (AND/OR). | func RHSParser(queryStrValue string, valueType string) (value interface{}, comparisonOperator string, logicOperator string) {
var val interface{}
var cOperator string = " = "
var lOperator string = " AND "
parts := strings.Split(queryStrValue, ":")
len := len(parts)
if valueType == "int" {
var number int64
number, _ = strconv.ParseInt(parts[0], 10, 64)
val = number
} else if valueType == "float" {
number := 0.0
number, _ = strconv.ParseFloat(parts[0], 64)
val = number
} else {
val = parts[0]
}
if len == 1 {
cOperator = comparisonOperators["eq"]
lOperator = " AND "
return val, cOperator, lOperator
}
if comparisonOperators[parts[1]] != "" {
cOperator = comparisonOperators[parts[1]]
}
if len == 3 {
if logicalOperators[parts[2]] != "" {
lOperator = logicalOperators[parts[2]]
}
}
return val, cOperator, lOperator
} | [
"func QueryStringParser(queryStr string, filters map[string]string) []FilteredResult {\n\t//define custom map type to allowduplicate keys\n\ttype Map struct {\n\t\tKey string\n\t\tValue string\n\t}\n\n\tparams := []Map{}\n\tsearchFilters := []FilteredResult{}\n\n\tparts := strings.Split(queryStr, \"&\")\n\n\t//build a key/value map of the querystring by\n\t//storing the query as key and the fragment as the value\n\tfor _, part := range parts {\n\t\tsplit := strings.Split(part, \"=\")\n\n\t\tif len(split) > 1 && split[1] != \"\" {\n\t\t\tparams = append(params, Map{\n\t\t\t\tKey: split[0],\n\t\t\t\tValue: split[1],\n\t\t\t})\n\t\t} else {\n\t\t\tparams = append(params, Map{\n\t\t\t\tKey: split[0],\n\t\t\t\tValue: \"\",\n\t\t\t})\n\t\t}\n\t}\n\n\t//\n\tfor _, param := range params {\n\t\tfor name, varType := range filters {\n\t\t\tif param.Key == name {\n\t\t\t\tesc, _ := url.QueryUnescape(param.Value)\n\t\t\t\tparseValue, operator, condition := RHSParser(esc, varType)\n\n\t\t\t\tsearchFilters = append(searchFilters, FilteredResult{\n\t\t\t\t\tField: param.Key,\n\t\t\t\t\tType: varType,\n\t\t\t\t\tValue: parseValue,\n\t\t\t\t\tOperator: operator,\n\t\t\t\t\tCondition: condition,\n\t\t\t\t})\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn searchFilters\n}",
"func (r Rule) parse() (match, cond, result string) {\n\ts := strings.Split(r.rule, \"->\")\n\tif len(s) != 2 {\n\t\tlog.Fatalf(\"no arrow in %s\", r)\n\t}\n\tmatch = normalizeSpaces(s[0])\n\tresult = normalizeSpaces(s[1])\n\tcond = \"\"\n\tif i := strings.Index(match, \"&&\"); i >= 0 {\n\t\tcond = normalizeSpaces(match[i+2:])\n\t\tmatch = normalizeSpaces(match[:i])\n\t}\n\treturn match, cond, result\n}",
"func TestRevParsing04(t *testing.T) {\n\tvar q = \"(*Ops;AUTHORS , *Vigith Maurice;AUTHORS) ,& (ops-prod-vpc1-range, ops-prod-vpc1-mon)\"\n\tvar r = &RangeExpr{Buffer: q}\n\tr.Init()\n\tr.Expression.Init(q)\n\terr := r.Parse()\n\tif err != nil {\n\t\tt.Errorf(\"Expected NO Error, (Query: %s) should BE parsed [reverse lookup with attr and hint]\", q)\n\t}\n\n\tr.Execute()\n\tresult, errs := r.Evaluate(store)\n\tvar expected = []string{\"ops-prod-vpc1-mon\", \"ops-prod-vpc1-range\"}\n\tif len(errs) != 0 || !compare(*result, expected) {\n\t\tt.Errorf(\"Expected NO Evaluate Error, (Query: %s) should BE %s [Got: %s]\", q, expected, *result)\n\t}\n}",
"func newParserForIdentifierSubcondition(ctx RuleContext, identifier string) (predicate.Parser, error) {\n\tbinaryPred := func(predFn func(a, b interface{}) predicate.BoolPredicate, exprFn func(a, b types.WhereExpr) types.WhereExpr) func(a, b interface{}) types.WhereExpr {\n\t\treturn func(a, b interface{}) types.WhereExpr {\n\t\t\tan, aOK := a.(types.WhereExpr)\n\t\t\tif !aOK {\n\t\t\t\tan = types.WhereExpr{Literal: a}\n\t\t\t}\n\t\t\tbn, bOK := b.(types.WhereExpr)\n\t\t\tif !bOK {\n\t\t\t\tbn = types.WhereExpr{Literal: b}\n\t\t\t}\n\t\t\tif an.Literal != nil && bn.Literal != nil {\n\t\t\t\treturn types.WhereExpr{Literal: predFn(an.Literal, bn.Literal)()}\n\t\t\t}\n\t\t\treturn exprFn(an, bn)\n\t\t}\n\t}\n\treturn predicate.NewParser(predicate.Def{\n\t\tOperators: predicate.Operators{\n\t\t\tAND: func(a, b types.WhereExpr) types.WhereExpr {\n\t\t\t\taVal, aOK := a.Literal.(bool)\n\t\t\t\tbVal, bOK := b.Literal.(bool)\n\t\t\t\tswitch {\n\t\t\t\tcase aOK && bOK:\n\t\t\t\t\treturn types.WhereExpr{Literal: aVal && bVal}\n\t\t\t\tcase aVal:\n\t\t\t\t\treturn b\n\t\t\t\tcase bVal:\n\t\t\t\t\treturn a\n\t\t\t\tcase aOK || bOK:\n\t\t\t\t\treturn types.WhereExpr{Literal: false}\n\t\t\t\tdefault:\n\t\t\t\t\treturn types.WhereExpr{And: types.WhereExpr2{L: &a, R: &b}}\n\t\t\t\t}\n\t\t\t},\n\t\t\tOR: func(a, b types.WhereExpr) types.WhereExpr {\n\t\t\t\taVal, aOK := a.Literal.(bool)\n\t\t\t\tbVal, bOK := b.Literal.(bool)\n\t\t\t\tswitch {\n\t\t\t\tcase aOK && bOK:\n\t\t\t\t\treturn types.WhereExpr{Literal: aVal || bVal}\n\t\t\t\tcase aVal || bVal:\n\t\t\t\t\treturn types.WhereExpr{Literal: true}\n\t\t\t\tcase aOK:\n\t\t\t\t\treturn b\n\t\t\t\tcase bOK:\n\t\t\t\t\treturn a\n\t\t\t\tdefault:\n\t\t\t\t\treturn types.WhereExpr{Or: types.WhereExpr2{L: &a, R: &b}}\n\t\t\t\t}\n\t\t\t},\n\t\t\tNOT: func(expr types.WhereExpr) types.WhereExpr {\n\t\t\t\tif val, ok := expr.Literal.(bool); ok {\n\t\t\t\t\treturn types.WhereExpr{Literal: !val}\n\t\t\t\t}\n\t\t\t\treturn types.WhereExpr{Not: &expr}\n\t\t\t},\n\t\t},\n\t\tFunctions: map[string]interface{}{\n\t\t\t\"equals\": binaryPred(predicate.Equals, func(a, b types.WhereExpr) types.WhereExpr {\n\t\t\t\treturn types.WhereExpr{Equals: types.WhereExpr2{L: &a, R: &b}}\n\t\t\t}),\n\t\t\t\"contains\": binaryPred(predicate.Contains, func(a, b types.WhereExpr) types.WhereExpr {\n\t\t\t\treturn types.WhereExpr{Contains: types.WhereExpr2{L: &a, R: &b}}\n\t\t\t}),\n\t\t},\n\t\tGetIdentifier: func(fields []string) (interface{}, error) {\n\t\t\tif fields[0] == identifier {\n\t\t\t\t// TODO: Session events have only one level of attributes. Support for\n\t\t\t\t// more nested levels may be added when needed for other objects.\n\t\t\t\tif len(fields) != 2 {\n\t\t\t\t\treturn nil, trace.BadParameter(\"only exactly two fields are supported with identifier %q, got %d: %v\", identifier, len(fields), fields)\n\t\t\t\t}\n\t\t\t\treturn types.WhereExpr{Field: fields[1]}, nil\n\t\t\t}\n\t\t\tlit, err := ctx.GetIdentifier(fields)\n\t\t\treturn types.WhereExpr{Literal: lit}, trace.Wrap(err)\n\t\t},\n\t\tGetProperty: func(mapVal, keyVal interface{}) (interface{}, error) {\n\t\t\tmapExpr, mapOK := mapVal.(types.WhereExpr)\n\t\t\tif !mapOK {\n\t\t\t\tmapExpr = types.WhereExpr{Literal: mapVal}\n\t\t\t}\n\t\t\tkeyExpr, keyOK := keyVal.(types.WhereExpr)\n\t\t\tif !keyOK {\n\t\t\t\tkeyExpr = types.WhereExpr{Literal: keyVal}\n\t\t\t}\n\t\t\tif mapExpr.Literal == nil || keyExpr.Literal == nil {\n\t\t\t\t// TODO: Add support for general WhereExpr.\n\t\t\t\treturn nil, trace.BadParameter(\"GetProperty is implemented only for literals\")\n\t\t\t}\n\t\t\treturn GetStringMapValue(mapExpr.Literal, keyExpr.Literal)\n\t\t},\n\t})\n}",
"func parseRHS (e * Equation) ([]string, error) {\n eString := string (*e)\n\n // Split equation tin arrays; LHS and RHS\n sides := strings.Split (eString, \"=\")\n rhs := sides[1]\n\n // Remove all spaces\n rhs = strings.Replace (rhs, \" \", \"\", -1)\n\n // Split string to array around \"+\" operator\n contents := strings.Split (rhs, \"+\")\n\n if len (contents) == 0 {\n return nil, fmt.Errorf (\"Invalid equation: %s. RHS cannot be empty\", eString)\n }\n\n // Iterate through each element of the slice and check that it's either an\n // unsigned integer or a well formed variable\n for _, elem := range contents {\n _, numErr := strconv.ParseUint (elem, 10, 64)\n if !isAlpha (elem) && numErr != nil {\n return nil, fmt.Errorf (\"Malformed equation: %s. Only '+' operators are accepted\", eString)\n }\n }\n\n return contents, nil\n}",
"func (r Rule) parse() (match, cond, result string) {\n\ts := strings.Split(r.rule, \"->\")\n\tif len(s) != 2 {\n\t\tlog.Fatalf(\"no arrow in %s\", r)\n\t}\n\tmatch = strings.TrimSpace(s[0])\n\tresult = strings.TrimSpace(s[1])\n\tcond = \"\"\n\tif i := strings.Index(match, \"&&\"); i >= 0 {\n\t\tcond = strings.TrimSpace(match[i+2:])\n\t\tmatch = strings.TrimSpace(match[:i])\n\t}\n\treturn match, cond, result\n}",
"func formulaCriteriaParser(exp string) (fc *formulaCriteria) {\n\tfc = &formulaCriteria{}\n\tif exp == \"\" {\n\t\treturn\n\t}\n\tif match := regexp.MustCompile(`^(\\d+)$`).FindStringSubmatch(exp); len(match) > 1 {\n\t\tfc.Type, fc.Condition = criteriaEq, match[1]\n\t\treturn\n\t}\n\tif match := regexp.MustCompile(`^=(.*)$`).FindStringSubmatch(exp); len(match) > 1 {\n\t\tfc.Type, fc.Condition = criteriaEq, match[1]\n\t\treturn\n\t}\n\tif match := regexp.MustCompile(`^<>(.*)$`).FindStringSubmatch(exp); len(match) > 1 {\n\t\tfc.Type, fc.Condition = criteriaNe, match[1]\n\t\treturn\n\t}\n\tif match := regexp.MustCompile(`^<=(.*)$`).FindStringSubmatch(exp); len(match) > 1 {\n\t\tfc.Type, fc.Condition = criteriaLe, match[1]\n\t\treturn\n\t}\n\tif match := regexp.MustCompile(`^>=(.*)$`).FindStringSubmatch(exp); len(match) > 1 {\n\t\tfc.Type, fc.Condition = criteriaGe, match[1]\n\t\treturn\n\t}\n\tif match := regexp.MustCompile(`^<(.*)$`).FindStringSubmatch(exp); len(match) > 1 {\n\t\tfc.Type, fc.Condition = criteriaL, match[1]\n\t\treturn\n\t}\n\tif match := regexp.MustCompile(`^>(.*)$`).FindStringSubmatch(exp); len(match) > 1 {\n\t\tfc.Type, fc.Condition = criteriaG, match[1]\n\t\treturn\n\t}\n\tif strings.Contains(exp, \"?\") {\n\t\texp = strings.ReplaceAll(exp, \"?\", \".\")\n\t}\n\tif strings.Contains(exp, \"*\") {\n\t\texp = strings.ReplaceAll(exp, \"*\", \".*\")\n\t}\n\tfc.Type, fc.Condition = criteriaRegexp, exp\n\treturn\n}",
"func QueryStringParser(query string) (*Query, error) {\n\tvar (\n\t\tkeyStart, keyEnd int\n\t\tvalStart, valEnd int\n\t\tfirstInfoHash string\n\n\t\tonKey = true\n\t\thasInfoHash = false\n\n\t\tq = &Query{\n\t\t\tInfoHashes: nil,\n\t\t\tParams: make(map[string]string),\n\t\t}\n\t)\n\n\tfor i, length := 0, len(query); i < length; i++ {\n\t\tseparator := query[i] == '&' || query[i] == ';' || query[i] == '?'\n\t\tif separator || i == length-1 {\n\t\t\tif onKey {\n\t\t\t\tkeyStart = i + 1\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif i == length-1 && !separator {\n\t\t\t\tif query[i] == '=' {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tvalEnd = i\n\t\t\t}\n\t\t\tkeyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// The start can be greater than the end when the query contains an invalid\n\t\t\t// empty query value\n\t\t\tif valStart > valEnd {\n\t\t\t\treturn nil, errors.New(\"Malformed request\")\n\t\t\t}\n\n\t\t\tvalStr, err := url.QueryUnescape(query[valStart : valEnd+1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tq.Params[strings.ToLower(keyStr)] = valStr\n\n\t\t\tif keyStr == \"info_hash\" {\n\t\t\t\tif hasInfoHash {\n\t\t\t\t\t// Multiple info hashes\n\t\t\t\t\tif q.InfoHashes == nil {\n\t\t\t\t\t\tq.InfoHashes = []string{firstInfoHash}\n\t\t\t\t\t}\n\n\t\t\t\t\tq.InfoHashes = append(q.InfoHashes, valStr)\n\t\t\t\t} else {\n\t\t\t\t\tfirstInfoHash = valStr\n\t\t\t\t\thasInfoHash = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tonKey = true\n\t\t\tkeyStart = i + 1\n\t\t} else if query[i] == '=' {\n\t\t\tonKey = false\n\t\t\tvalStart = i + 1\n\t\t} else if onKey {\n\t\t\tkeyEnd = i\n\t\t} else {\n\t\t\tvalEnd = i\n\t\t}\n\t}\n\n\treturn q, nil\n}",
"func (p *parser) parseBinaryOpRHS(exprPrec int, lhs node) node {\n\tpos := p.token.pos\n\tfor {\n\t\tif p.token.kind < tokUserUnaryOp {\n\t\t\treturn lhs // an expression like '5' will get sent back up to parseTopLevelExpr or parseDefinition from here.\n\t\t}\n\t\ttokenPrec := p.getTokenPrecedence(p.token.val)\n\t\tif tokenPrec < exprPrec {\n\t\t\treturn lhs\n\t\t}\n\t\tbinOp := p.token.val\n\t\tp.next()\n\n\t\trhs := p.parseUnarty()\n\t\tif rhs == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tnextPrec := p.getTokenPrecedence(p.token.val)\n\t\tif tokenPrec < nextPrec {\n\t\t\trhs = p.parseBinaryOpRHS(tokenPrec+1, rhs)\n\t\t\tif rhs == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tlhs = &binaryNode{nodeBinary, pos, binOp, lhs, rhs}\n\t}\n}",
"func parseAfterQuery(p *parser) parserStateFn {\n\ttok := p.next()\n\tswitch tok.typ {\n\tcase tokTypeError:\n\t\tp.backup(tok)\n\t\treturn parseErrorTok\n\tcase tokTypeEOF:\n\t\tp.backup(tok)\n\t\treturn parseEOFTok\n\tcase tokTypeCloseParen:\n\t\tif p.incompleteBoolOp {\n\t\t\t// E.g.: \"(foo and)\"\n\t\t\t// Dev Note: I can't trigger this in tests.\n\t\t\treturn p.errorfAt(tok.pos, \"incomplete boolean operator\")\n\t\t}\n\t\t// Pop ops up to, and including, the matching rpnOpenParen.\n\t\tfor {\n\t\t\tif p.stagedOps.Len() == 0 {\n\t\t\t\treturn p.errorfAt(tok.pos, \"unmatched close parenthesis\")\n\t\t\t}\n\t\t\topTok := p.stagedOps.Pop()\n\t\t\tif opTok.typ == tokTypeOpenParen {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tp.filter.addBoolOp(opTok)\n\t\t\t}\n\t\t}\n\t\treturn parseAfterQuery\n\tcase tokTypeAnd:\n\t\tp.stageBoolOp(tok)\n\t\tp.incompleteBoolOp = true\n\t\treturn parseBeforeQuery\n\tcase tokTypeOr:\n\t\tp.stageBoolOp(tok)\n\t\tp.incompleteBoolOp = true\n\t\treturn parseBeforeQuery\n\tdefault:\n\t\treturn p.errorfAt(tok.pos, \"expect 'and', 'or', or ')'; got %s\",\n\t\t\ttok.typ)\n\t}\n}",
"func parseMatcherWithoutLogicalOperator(s string) (m MatcherWithoutLogicalOperator, err error) {\n\tunParsed := s // the characters of s which are yet to be parsed\n\n\tswitch {\n\tcase strings.HasPrefix(unParsed, \"data1\"):\n\t\tm.LeftOperand = Data1\n\tcase strings.HasPrefix(unParsed, \"data2\"):\n\t\tm.LeftOperand = Data2\n\tdefault:\n\t\terr = fmt.Errorf(\"matcher %q: no valid left operand\", s)\n\t\treturn\n\t}\n\tunParsed = unParsed[len(\"datax\"):] // Discard parsed leftOperand\n\n\tskipToNonSpaceCharacter(&unParsed)\n\tvar operatorLength int\n\tswitch {\n\tcase strings.HasPrefix(unParsed, \"==\"):\n\t\tm.Operator = EqualToOperator\n\t\toperatorLength = 2\n\tcase strings.HasPrefix(unParsed, \"!=\"):\n\t\tm.Operator = UnequalToOperator\n\t\toperatorLength = 2\n\tcase strings.HasPrefix(unParsed, \"<=\"):\n\t\tm.Operator = LessThanOrEqualToOperator\n\t\toperatorLength = 2\n\tcase strings.HasPrefix(unParsed, \">=\"):\n\t\tm.Operator = GreaterThanOrEqualToOperator\n\t\toperatorLength = 2\n\tcase strings.HasPrefix(unParsed, \"<\"):\n\t\tm.Operator = LessThanOperator\n\t\toperatorLength = 1\n\tcase strings.HasPrefix(unParsed, \">\"):\n\t\tm.Operator = GreaterThanOperator\n\t\toperatorLength = 1\n\tdefault:\n\t\terr = fmt.Errorf(\"matcher %q: no valid comparison operator\", s)\n\t\treturn\n\t}\n\tunParsed = unParsed[operatorLength:] // Discard parsed operator\n\n\tskipToNonSpaceCharacter(&unParsed)\n\tn, err := strconv.ParseInt(unParsed, 10, 64)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"matcher %q: no valid right operand\", s)\n\t\treturn\n\t}\n\tm.RightOperand = int64(n)\n\n\treturn\n}",
"func parseEquation (e * Equation) (string, []string, error) {\n eString := string (*e)\n\n if strings.Count (eString, \"=\") != 1 {\n return \"\", nil, fmt.Errorf (\"Invalid format: %s\\n\", eString)\n }\n\n lhs, err := parseLHS (e)\n if err != nil { return \"\", nil, err }\n\n rhs, err := parseRHS (e)\n if err != nil { return \"\", nil, err }\n\n return lhs, rhs, nil\n}",
"func (p *Parser) parseRelationalExpr() *ast.ASTNode {\n\tnode := p.parseShiftExpr()\n\tfor {\n\t\tif p.lookAhead(\"<\") {\n\t\t\tnode = &ast.ASTNode{\n\t\t\t\tKind: ast.RIGHT_INEQUALITY,\n\t\t\t\tLeft: node,\n\t\t\t\tRight: p.parseRelationalExpr(),\n\t\t\t}\n\t\t} else if p.lookAhead(\">\") {\n\t\t\tnode = &ast.ASTNode{\n\t\t\t\tKind: ast.LEFT_INEQUALITY,\n\t\t\t\tLeft: node,\n\t\t\t\tRight: p.parseRelationalExpr(),\n\t\t\t}\n\t\t} else if p.lookAhead(\"<=\") {\n\t\t\tnode = &ast.ASTNode{\n\t\t\t\tKind: ast.RIGHT_INEQUALITY_EQ,\n\t\t\t\tLeft: node,\n\t\t\t\tRight: p.parseRelationalExpr(),\n\t\t\t}\n\t\t} else if p.lookAhead(\">=\") {\n\t\t\tnode = &ast.ASTNode{\n\t\t\t\tKind: ast.LEFT_INEQUALITY_EQ,\n\t\t\t\tLeft: node,\n\t\t\t\tRight: p.parseRelationalExpr(),\n\t\t\t}\n\t\t} else if p.lookAhead(\"==\") {\n\t\t\tnode = &ast.ASTNode{\n\t\t\t\tKind: ast.EQUAL,\n\t\t\t\tLeft: node,\n\t\t\t\tRight: p.parseRelationalExpr(),\n\t\t\t}\n\t\t} else if p.lookAhead(\"!=\") {\n\t\t\tnode = &ast.ASTNode{\n\t\t\t\tKind: ast.NOT_EQUAL,\n\t\t\t\tLeft: node,\n\t\t\t\tRight: p.parseRelationalExpr(),\n\t\t\t}\n\t\t} else {\n\t\t\treturn node\n\t\t}\n\t}\n}",
"func parseBeforeQuery(p *parser) parserStateFn {\n\ttok := p.next()\n\tswitch tok.typ {\n\tcase tokTypeError:\n\t\tp.backup(tok)\n\t\treturn parseErrorTok\n\tcase tokTypeEOF:\n\t\tp.backup(tok)\n\t\treturn parseEOFTok\n\tcase tokTypeOpenParen:\n\t\t// Push the '(' onto the ops stack. It will be the marker at which to\n\t\t// stop when the ')' token is parsed.\n\t\tp.stagedOps.Push(tok)\n\t\treturn parseBeforeQuery\n\tcase tokTypeNot:\n\t\tp.stageBoolOp(tok)\n\t\tp.incompleteBoolOp = true\n\t\treturn parseBeforeQuery\n\tcase tokTypeUnquotedLiteral, tokTypeQuotedLiteral:\n\t\tp.incompleteBoolOp = false\n\t\tswitch tok2 := p.peek(); tok2.typ {\n\t\tcase tokTypeError:\n\t\t\treturn parseErrorTok\n\t\tcase tokTypeGt, tokTypeGte, tokTypeLt, tokTypeLte:\n\t\t\t// E.g.: `a.field >= 100`, `some.date.field < \"2021-02\"`\n\t\t\tif tok.typ == tokTypeQuotedLiteral {\n\t\t\t\treturn p.errorfAt(tok.pos, \"a *quoted* field for a range query is not yet supported\")\n\t\t\t}\n\t\t\tp.field = &tok\n\t\t\treturn parseRangeQuery\n\t\tcase tokTypeColon:\n\t\t\t// E.g.: `foo:value1 value2`, `foo:(a or b)`, `foo:(a and b and c)`,\n\t\t\t// `foo:*`\n\t\t\tif tok.typ == tokTypeQuotedLiteral {\n\t\t\t\treturn p.errorfAt(tok.pos, \"a *quoted* field for a term query is not yet supported\")\n\t\t\t}\n\t\t\tp.field = &tok\n\t\t\treturn parseTermsQuery\n\t\tdefault:\n\t\t\t// E.g.: `foo bar baz`\n\t\t\t// No range operator and no colon means this is a query without\n\t\t\t// a field name. In Kibana, this matches against \"default fields\".\n\t\t\ttermTok := tok\n\t\t\tvar terms []term\n\t\t\tfor {\n\t\t\t\tif termTok.typ == tokTypeUnquotedLiteral {\n\t\t\t\t\tterms = append(terms, newTerm(termTok.val))\n\t\t\t\t} else if termTok.typ == tokTypeQuotedLiteral {\n\t\t\t\t\tterms = append(terms, newQuotedTerm(termTok.val))\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\ttermTok = p.next()\n\t\t\t}\n\t\t\tp.backup(termTok)\n\t\t\tp.filter.addStep(&rpnDefaultFieldsTermsQuery{terms: terms})\n\t\t\treturn parseAfterQuery\n\t\t}\n\tdefault:\n\t\treturn p.errorfAt(tok.pos,\n\t\t\t\"expecting a literal, 'not', or '('; got %s\", tok.typ)\n\t}\n}",
"func parseLHS (e * Equation) (string, error) {\n var err error\n\n eString := string (*e)\n\n // Split equation into two separate arrays; LHS and RHS\n sides := strings.Split (eString, \"=\")\n lhs := sides[0] // Save LHS only for parsing\n\n contents := strings.Fields (lhs) // Partition string around single space\n\n // Check to see that not only should there be one variable but it's also\n // part of the alphabet. Otherwise, throw an error.\n if len (contents) == 1 {\n if isAlpha (contents[0]) {\n return contents[0], nil\n }\n err = fmt.Errorf (\"LHS variable must be alphebetic: %s\", contents[0])\n } else if len (contents) > 1 {\n err = fmt.Errorf (\"Too many LHS variables\")\n } else {\n err = fmt.Errorf (\"LHS is empty!\")\n }\n\n return \"\", err\n}",
"func ParseQuery(ulrString string) *domain.ListOptions {\n}",
"func ParseQuery(ulrString string) *domain.ListOptions {\n}",
"func parseRangeQuery(p *parser) parserStateFn {\n\topTok := p.next() // Already checked to be the range operator token.\n\tvalTok := p.next()\n\tswitch valTok.typ {\n\tcase tokTypeError:\n\t\tp.backup(valTok)\n\t\treturn parseErrorTok\n\tcase tokTypeUnquotedLiteral, tokTypeQuotedLiteral:\n\t\tvar trm term\n\t\tif valTok.typ == tokTypeUnquotedLiteral {\n\t\t\ttrm = newTerm(valTok.val)\n\t\t} else {\n\t\t\ttrm = newQuotedTerm(valTok.val)\n\t\t}\n\t\tif trm.Wildcard {\n\t\t\treturn p.errorfAt(valTok.pos, \"cannot have a wildcard in range query token\")\n\t\t}\n\t\tvar q rpnStep\n\t\tswitch opTok.typ {\n\t\tcase tokTypeGt:\n\t\t\tq = &rpnGtRangeQuery{\n\t\t\t\tfield: p.field.val,\n\t\t\t\tterm: trm,\n\t\t\t\tlogLevelLess: p.logLevelLess,\n\t\t\t}\n\t\tcase tokTypeGte:\n\t\t\tq = &rpnGteRangeQuery{\n\t\t\t\tfield: p.field.val,\n\t\t\t\tterm: trm,\n\t\t\t\tlogLevelLess: p.logLevelLess,\n\t\t\t}\n\t\tcase tokTypeLt:\n\t\t\tq = &rpnLtRangeQuery{\n\t\t\t\tfield: p.field.val,\n\t\t\t\tterm: trm,\n\t\t\t\tlogLevelLess: p.logLevelLess,\n\t\t\t}\n\t\tcase tokTypeLte:\n\t\t\tq = &rpnLteRangeQuery{\n\t\t\t\tfield: p.field.val,\n\t\t\t\tterm: trm,\n\t\t\t\tlogLevelLess: p.logLevelLess,\n\t\t\t}\n\t\tdefault:\n\t\t\tlg.Fatalf(\"invalid opTok.typ=%v while parsing range query\", opTok.typ)\n\t\t}\n\t\tp.filter.addStep(q)\n\t\tp.field = nil\n\t\treturn parseAfterQuery\n\tdefault:\n\t\treturn p.errorfAt(valTok.pos, \"expected a literal after '%s'; got %s\",\n\t\t\topTok.val, valTok.typ)\n\t}\n}",
"func parsePipe(p *parser, lhs expr) (e expr, err error) {\n\tp.next()\n\tif lhs.isEmpty() {\n\t\terr = fmt.Errorf(\"invalid empty expression to the left of %c at pos %d %#v %#v\", p.peek(), p.pos, lhs.start, lhs.end)\n\t\treturn\n\t}\n\tif e, err = parseClause(p); err != nil {\n\t\treturn\n\t}\n\te.start.addEpsilon(lhs.start)\n\tn := &pipeNode{}\n\te = concatNode(e, n)\n\tlhs = concatNode(lhs, n)\n\treturn\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DailyRule. | func (in *DailyRule) DeepCopy() *DailyRule {
if in == nil {
return nil
}
out := new(DailyRule)
in.DeepCopyInto(out)
return out
} | [
"func (in *Rule) DeepCopy() *Rule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Rule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *Day) DeepCopy() *Day {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Day)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *TriggerRule) DeepCopy() *TriggerRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TriggerRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *Day_ARM) DeepCopy() *Day_ARM {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Day_ARM)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ClbStatefulSetRule) DeepCopy() *ClbStatefulSetRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ClbStatefulSetRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *DailyMaintenanceWindow) DeepCopy() *DailyMaintenanceWindow {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DailyMaintenanceWindow)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScaleRule) DeepCopy() *ScaleRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScaleRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *PathRule) DeepCopy() *PathRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PathRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *MetricRule) DeepCopy() *MetricRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MetricRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (rule *Rule) Copy() *Rule {\n\tcpy := *rule\n\tcpy.Key = rule.Key.Copy()\n\tcpy.Value = rule.Value.Copy()\n\tcpy.Body = rule.Body.Copy()\n\treturn &cpy\n}",
"func (in *RewriteRule) DeepCopy() *RewriteRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(RewriteRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *DnsRule) DeepCopy() *DnsRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DnsRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (o ConfigurationBackupRetentionPolicyOutput) DailySchedule() ConfigurationBackupRetentionPolicyDailySchedulePtrOutput {\n\treturn o.ApplyT(func(v ConfigurationBackupRetentionPolicy) *ConfigurationBackupRetentionPolicyDailySchedule {\n\t\treturn v.DailySchedule\n\t}).(ConfigurationBackupRetentionPolicyDailySchedulePtrOutput)\n}",
"func (in *Weekly) DeepCopy() *Weekly {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Weekly)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *MatchRule) DeepCopy() *MatchRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MatchRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *Day_STATUS) DeepCopy() *Day_STATUS {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Day_STATUS)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *RDBACLRule) DeepCopy() *RDBACLRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(RDBACLRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *EntityRule) DeepCopy() *EntityRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EntityRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *WorkloadRule) DeepCopy() *WorkloadRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WorkloadRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleRule. | func (in *ScaleRule) DeepCopy() *ScaleRule {
if in == nil {
return nil
}
out := new(ScaleRule)
in.DeepCopyInto(out)
return out
} | [
"func (in *Scale) DeepCopy() *Scale {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Scale)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScaleTarget) DeepCopy() *ScaleTarget {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScaleTarget)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *MemcachedScalingRules) DeepCopy() *MemcachedScalingRules {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MemcachedScalingRules)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *WorkloadRule) DeepCopy() *WorkloadRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WorkloadRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (c *LinehaulCostComputation) Scale(factor float64) {\n\tc.BaseLinehaul = c.BaseLinehaul.MultiplyFloat64(factor)\n\tc.OriginLinehaulFactor = c.OriginLinehaulFactor.MultiplyFloat64(factor)\n\tc.DestinationLinehaulFactor = c.DestinationLinehaulFactor.MultiplyFloat64(factor)\n\tc.ShorthaulCharge = c.ShorthaulCharge.MultiplyFloat64(factor)\n\tc.LinehaulChargeTotal = c.LinehaulChargeTotal.MultiplyFloat64(factor)\n}",
"func (in *ScaleList) DeepCopy() *ScaleList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScaleList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction {\r\n\tfn.scale = scale\r\n\treturn fn\r\n}",
"func (in *ScaleSpec) DeepCopy() *ScaleSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScaleSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *PgBouncerScalingRules) DeepCopy() *PgBouncerScalingRules {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PgBouncerScalingRules)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (self *Affine2) Scale(scaleX, scaleY float32) *Affine2 {\n\tself.m00 *= scaleX\n\tself.m01 *= scaleY\n\tself.m10 *= scaleX\n\tself.m11 *= scaleY\n\treturn self\n}",
"func (in *Rule) DeepCopy() *Rule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Rule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (a Polynomial) Scale(s byte) Polynomial {\n\tif s == 0 {\n\t\treturn Polynomial{a.field, nil}\n\t}\n\tif s == 1 {\n\t\treturn a\n\t}\n\tcoefficients := make([]byte, len(a.coefficients))\n\tfor i, coeff_i := range a.coefficients {\n\t\tcoefficients[i] = a.field.Mul(coeff_i, s)\n\t}\n\treturn NewPolynomial(a.field, coefficients...)\n}",
"func (in *MetricRule) DeepCopy() *MetricRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MetricRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (r Rectangle) Scale(factor float64) Rectangle {\n\treturn Rectangle{\n\t\tMin: r.Min.Mul(factor),\n\t\tMax: r.Max.Mul(factor),\n\t}\n}",
"func (p *Point) Scale(factor float64) {\n\tif p == nil {\n\t\treturn\n\t}\n\tp.X = p.X * factor\n\tp.Y = p.Y * factor\n}",
"func (in *EtcdScalingRules) DeepCopy() *EtcdScalingRules {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(EtcdScalingRules)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (p Period) Scale(factor float64) *Period {\n\tresult, _ := p.ScaleWithOverflowCheck(factor)\n\treturn result\n}",
"func (p Point) Scale(s float64) Point {\n\treturn NewPoint(p.X*s, p.Y*s)\n}",
"func (self *T) Scale(f float64) *T {\n\tself[0] *= f\n\tself[1] *= f\n\treturn self\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. | func (in *ScaleSpec) DeepCopy() *ScaleSpec {
if in == nil {
return nil
}
out := new(ScaleSpec)
in.DeepCopyInto(out)
return out
} | [
"func (in *ScalewaySpec) DeepCopy() *ScalewaySpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalewaySpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *Scale) DeepCopy() *Scale {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Scale)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScalableTargetSpec) DeepCopy() *ScalableTargetSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalableTargetSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScalingPolicySpec) DeepCopy() *ScalingPolicySpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalingPolicySpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScalerSpec) DeepCopy() *ScheduledPodScalerSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *MetricsSpec) DeepCopy() *MetricsSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MetricsSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *Spec) DeepCopy() *Spec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Spec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *BandwidthSliceSpec) DeepCopy() *BandwidthSliceSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(BandwidthSliceSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerSpec) DeepCopy() *SchedulingScalerSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in ChartSpec) DeepCopy() ChartSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ChartSpec)\n\tin.DeepCopyInto(out)\n\treturn *out\n}",
"func (in *ChartSpec) DeepCopy() *ChartSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ChartSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *StoragePoolSpec) DeepCopy() *StoragePoolSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(StoragePoolSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScaleRule) DeepCopy() *ScaleRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScaleRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (r Rectangle) Scale(factor float64) Rectangle {\n\treturn Rectangle{\n\t\tMin: r.Min.Mul(factor),\n\t\tMax: r.Max.Mul(factor),\n\t}\n}",
"func (self *T) Scale(f float64) *T {\n\tself[0] *= f\n\tself[1] *= f\n\treturn self\n}",
"func (in *MinMaxSpec) DeepCopy() *MinMaxSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MinMaxSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScaleTarget) DeepCopy() *ScaleTarget {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScaleTarget)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (wv *Spectrum) Scale(s float32) {\n\twv.C[0] *= s\n\twv.C[1] *= s\n\twv.C[2] *= s\n\twv.C[3] *= s\n}",
"func (in *RatioMetricSpec) DeepCopy() *RatioMetricSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(RatioMetricSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleTarget. | func (in *ScaleTarget) DeepCopy() *ScaleTarget {
if in == nil {
return nil
}
out := new(ScaleTarget)
in.DeepCopyInto(out)
return out
} | [
"func (in *Scale) DeepCopy() *Scale {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Scale)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScalableTarget) DeepCopy() *ScalableTarget {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalableTarget)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScaleRule) DeepCopy() *ScaleRule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScaleRule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (self *T) Scale(f float64) *T {\n\tself[0] *= f\n\tself[1] *= f\n\treturn self\n}",
"func (o HorizontalPodAutoscalerSpecOutput) ScaleTargetRef() CrossVersionObjectReferenceOutput {\n\treturn o.ApplyT(func(v HorizontalPodAutoscalerSpec) CrossVersionObjectReference { return v.ScaleTargetRef }).(CrossVersionObjectReferenceOutput)\n}",
"func (t *Transform) Scale(sx, sy float64) {\n\tout := fmt.Sprintf(\"scale(%g,%g)\", sx, sy)\n\n\tt.transforms = append(t.transforms, out)\n}",
"func (in *ScaleList) DeepCopy() *ScaleList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScaleList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (s *UpdateTaskSetInput) SetScale(v *Scale) *UpdateTaskSetInput {\n\ts.Scale = v\n\treturn s\n}",
"func (in *ScalableTargetList) DeepCopy() *ScalableTargetList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalableTargetList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (self *T) Scale(f float32) *T {\n\tself[0][0] *= f\n\tself[1][1] *= f\n\treturn self\n}",
"func (in *ScaleSpec) DeepCopy() *ScaleSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScaleSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (t *Transform) SetScale(sx, sy float64) *Transform {\n\tt.Scale1.X = sx - 1\n\tt.Scale1.Y = sy - 1\n\treturn t\n}",
"func (self Transform) Scale(scaleX, scaleY float32) {\n\tC.sfTransform_scale(self.Cref, C.float(scaleX), C.float(scaleY))\n}",
"func (this *Transformable) Scale(factor Vector2f) {\n\tC.sfTransformable_scale(this.cptr, factor.toC())\n}",
"func (in *ElasticsearchDataSetScaling) DeepCopy() *ElasticsearchDataSetScaling {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ElasticsearchDataSetScaling)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (c *canvasRenderer) Scale(amount sprec.Vec2) {\n\tc.currentLayer.Transform = sprec.Mat4Prod(\n\t\tc.currentLayer.Transform,\n\t\tsprec.ScaleMat4(amount.X, amount.Y, 1.0),\n\t)\n}",
"func (t *Transform) Scale() lmath.Vec3 {\n\tt.access.RLock()\n\ts := t.scale\n\tt.access.RUnlock()\n\treturn s\n}",
"func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction {\r\n\tfn.scale = scale\r\n\treturn fn\r\n}",
"func (p *Point) Scale(factor float64) {\n\tif p == nil {\n\t\treturn\n\t}\n\tp.X = p.X * factor\n\tp.Y = p.Y * factor\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledPodScaler. | func (in *ScheduledPodScaler) DeepCopy() *ScheduledPodScaler {
if in == nil {
return nil
}
out := new(ScheduledPodScaler)
in.DeepCopyInto(out)
return out
} | [
"func (in *ScheduledPodScalerSpec) DeepCopy() *ScheduledPodScalerSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScaler) DeepCopy() *SchedulingScaler {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScaler)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScalerList) DeepCopy() *ScheduledPodScalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScalerStatus) DeepCopy() *ScheduledPodScalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerSchedule) DeepCopy() *SchedulingScalerSchedule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerSchedule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerStatus) DeepCopy() *SchedulingScalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerList) DeepCopy() *SchedulingScalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingPolicy) DeepCopy() *SchedulingPolicy {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingPolicy)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledInstance) DeepCopy() *ScheduledInstance {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledInstance)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *HorizontalPodAutoscaling) DeepCopy() *HorizontalPodAutoscaling {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(HorizontalPodAutoscaling)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerSpec) DeepCopy() *SchedulingScalerSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScalingPolicy) DeepCopy() *ScalingPolicy {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalingPolicy)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *WatermarkPodAutoscaler) DeepCopy() *WatermarkPodAutoscaler {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WatermarkPodAutoscaler)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *AutoScheduling) DeepCopy() *AutoScheduling {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AutoScheduling)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func NewPodScheduling(ctx *pulumi.Context,\n\tname string, args *PodSchedulingArgs, opts ...pulumi.ResourceOption) (*PodScheduling, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Spec == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Spec'\")\n\t}\n\targs.ApiVersion = pulumi.StringPtr(\"resource.k8s.io/v1alpha1\")\n\targs.Kind = pulumi.StringPtr(\"PodScheduling\")\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource PodScheduling\n\terr := ctx.RegisterResource(\"kubernetes:resource.k8s.io/v1alpha1:PodScheduling\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func podScheduledState() *corev1.Pod {\n\treturn mustLoadPodRecording(podStatePath(\"scheduled\"))\n}",
"func (in *SchedulingPolicyStatus) DeepCopy() *SchedulingPolicyStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingPolicyStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledInstanceRecurrence) DeepCopy() *ScheduledInstanceRecurrence {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledInstanceRecurrence)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *Scheduler) DeepCopy() *Scheduler {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(Scheduler)\n\tin.DeepCopyInto(out)\n\treturn out\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledPodScalerList. | func (in *ScheduledPodScalerList) DeepCopy() *ScheduledPodScalerList {
if in == nil {
return nil
}
out := new(ScheduledPodScalerList)
in.DeepCopyInto(out)
return out
} | [
"func (in *ScheduledPodScaler) DeepCopy() *ScheduledPodScaler {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScaler)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerList) DeepCopy() *SchedulingScalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScalerStatus) DeepCopy() *ScheduledPodScalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScalerSpec) DeepCopy() *ScheduledPodScalerSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ProxySQLAutoscalerList) DeepCopy() *ProxySQLAutoscalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ProxySQLAutoscalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingPolicyList) DeepCopy() *SchedulingPolicyList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingPolicyList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *MySQLAutoscalerList) DeepCopy() *MySQLAutoscalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MySQLAutoscalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScaler) DeepCopy() *SchedulingScaler {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScaler)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *KafkaConnectAutoScalerList) DeepCopy() *KafkaConnectAutoScalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KafkaConnectAutoScalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScalingPolicyList) DeepCopy() *ScalingPolicyList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalingPolicyList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *AlamedaScalerList) DeepCopy() *AlamedaScalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AlamedaScalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulerList) DeepCopy() *SchedulerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerStatus) DeepCopy() *SchedulingScalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *WatermarkPodAutoscalerList) DeepCopy() *WatermarkPodAutoscalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WatermarkPodAutoscalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduleList) DeepCopy() *ScheduleList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduleList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *PerconaXtraDBAutoscalerList) DeepCopy() *PerconaXtraDBAutoscalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PerconaXtraDBAutoscalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledSparkApplicationList) DeepCopy() *ScheduledSparkApplicationList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledSparkApplicationList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *WatermarkHorizontalPodAutoscalerList) DeepCopy() *WatermarkHorizontalPodAutoscalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WatermarkHorizontalPodAutoscalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulerEnhancerList) DeepCopy() *SchedulerEnhancerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulerEnhancerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledPodScalerSpec. | func (in *ScheduledPodScalerSpec) DeepCopy() *ScheduledPodScalerSpec {
if in == nil {
return nil
}
out := new(ScheduledPodScalerSpec)
in.DeepCopyInto(out)
return out
} | [
"func (in *ScheduledPodScaler) DeepCopy() *ScheduledPodScaler {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScaler)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerSpec) DeepCopy() *SchedulingScalerSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScaler) DeepCopy() *SchedulingScaler {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScaler)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScalerList) DeepCopy() *ScheduledPodScalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerSchedule) DeepCopy() *SchedulingScalerSchedule {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerSchedule)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScalerStatus) DeepCopy() *ScheduledPodScalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingPolicySpec) DeepCopy() *SchedulingPolicySpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingPolicySpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScalingPolicySpec) DeepCopy() *ScalingPolicySpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalingPolicySpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *RSSPodSpec) DeepCopy() *RSSPodSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(RSSPodSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerList) DeepCopy() *SchedulingScalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulerSpec) DeepCopy() *SchedulerSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulerSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingScalerStatus) DeepCopy() *SchedulingScalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ServingRuntimePodSpec) DeepCopy() *ServingRuntimePodSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ServingRuntimePodSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduleSpec) DeepCopy() *ScheduleSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduleSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulerEnhancerSpec) DeepCopy() *SchedulerEnhancerSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulerEnhancerSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledInstance) DeepCopy() *ScheduledInstance {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledInstance)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *DeploymentV1PodSpec) DeepCopy() *DeploymentV1PodSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(DeploymentV1PodSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingPolicy) DeepCopy() *SchedulingPolicy {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingPolicy)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *PagerdutyServiceSpec) DeepCopy() *PagerdutyServiceSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(PagerdutyServiceSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduledPodScalerStatus. | func (in *ScheduledPodScalerStatus) DeepCopy() *ScheduledPodScalerStatus {
if in == nil {
return nil
}
out := new(ScheduledPodScalerStatus)
in.DeepCopyInto(out)
return out
} | [
"func (in *SchedulingScalerStatus) DeepCopy() *SchedulingScalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingScalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScaler) DeepCopy() *ScheduledPodScaler {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScaler)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulingPolicyStatus) DeepCopy() *SchedulingPolicyStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulingPolicyStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScalingPolicyStatus) DeepCopy() *ScalingPolicyStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScalingPolicyStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *WatermarkPodAutoscalerStatus) DeepCopy() *WatermarkPodAutoscalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WatermarkPodAutoscalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScalerList) DeepCopy() *ScheduledPodScalerList {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerList)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *KafkaConnectAutoScalerStatus) DeepCopy() *KafkaConnectAutoScalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(KafkaConnectAutoScalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *SchedulerEnhancerStatus) DeepCopy() *SchedulerEnhancerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(SchedulerEnhancerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *InstanceMaintenanceScheduleStatus) DeepCopy() *InstanceMaintenanceScheduleStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(InstanceMaintenanceScheduleStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledSparkApplicationStatus) DeepCopy() *ScheduledSparkApplicationStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledSparkApplicationStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *WatermarkHorizontalPodAutoscalerStatus) DeepCopy() *WatermarkHorizontalPodAutoscalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(WatermarkHorizontalPodAutoscalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduledPodScalerSpec) DeepCopy() *ScheduledPodScalerSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduledPodScalerSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *ScheduleStatus) DeepCopy() *ScheduleStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(ScheduleStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *TaskScheduleStatus) DeepCopy() *TaskScheduleStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(TaskScheduleStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *AlamedaScalerStatus) DeepCopy() *AlamedaScalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AlamedaScalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *AutoscalingLifecycleHookStatus) DeepCopy() *AutoscalingLifecycleHookStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AutoscalingLifecycleHookStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *AutoscalerStatus) DeepCopy() *AutoscalerStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AutoscalerStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *RegistrationServiceResourcesStatus) DeepCopy() *RegistrationServiceResourcesStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(RegistrationServiceResourcesStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func (in *AutoscalingRuleStatus) DeepCopy() *AutoscalingRuleStatus {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(AutoscalingRuleStatus)\n\tin.DeepCopyInto(out)\n\treturn out\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
PodmanImage provides access to an image reference from podman. same as github.com/google/gocontainerregistry/pkg/v1/daemon | func PodmanImage(ref name.Reference, options ...interface{}) (v1.Image, error) {
var img v1.Image
pr, pw := io.Pipe()
go func() {
opener := func() (io.ReadCloser, error) {
return pr, nil
}
var err error
tag := ref.(name.Digest).Tag()
img, err = tarball.Image(opener, &tag)
_ = pr.CloseWithError(err)
}()
// write the image in docker save format first, then load it
cmd := exec.Command("sudo", "podman", "image", "save", strings.Split(ref.Name(), "@")[0])
cmd.Stdout = pw
err := cmd.Run()
if err != nil {
return nil, fmt.Errorf("error loading image: %v", err)
}
return img, nil
} | [
"func (f *FakeRunner) podman(args []string, _ bool) (string, error) {\n\tswitch cmd := args[0]; cmd {\n\tcase \"--version\":\n\t\treturn \"podman version 1.6.4\", nil\n\n\tcase \"image\":\n\n\t\tif args[1] == \"inspect\" && args[2] == \"--format\" && args[3] == \"{{.Id}}\" {\n\t\t\tif args[3] == \"missing\" {\n\t\t\t\treturn \"\", &exec.ExitError{Stderr: []byte(\"Error: error getting image \\\"missing\\\": unable to find a name and tag match for missing in repotags: no such image\")}\n\t\t\t}\n\t\t\treturn \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\", nil\n\t\t}\n\n\t}\n\treturn \"\", nil\n}",
"func getDaemonSetImagePatch(containerName, containerImage string) string {\n\treturn fmt.Sprintf(`{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"%s\",\"image\":\"%s\"}]}}}}`, containerName, containerImage)\n}",
"func PodmanWrite(ref name.Reference, img v1.Image, opts ...tarball.WriteOption) (string, error) {\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\t_ = pw.CloseWithError(tarball.Write(ref, img, pw, opts...))\n\t}()\n\n\t// write the image in docker save format first, then load it\n\tcmd := exec.Command(\"sudo\", \"podman\", \"image\", \"load\")\n\tcmd.Stdin = pr\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error loading image: %v\", err)\n\t}\n\t// pull the image from the registry, to get the digest too\n\t// podman: \"Docker references with both a tag and digest are currently not supported\"\n\tcmd = exec.Command(\"sudo\", \"podman\", \"image\", \"pull\", strings.Split(ref.Name(), \"@\")[0])\n\terr = cmd.Run()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error pulling image: %v\", err)\n\t}\n\treturn string(output), nil\n}",
"func (e *podmanEngine) InspectImage(image string) (t types.ImageInspect, err error) {\n\tinspectcmd := exec.Command(\"podman\", \"inspect\", image)\n\tlogrus.Debugf(\"Inspecting image %s\", image)\n\toutput, err := inspectcmd.CombinedOutput()\n\tif err != nil {\n\t\tlogrus.Debugf(\"Unable to inspect image %s : %s, %s\", image, err, output)\n\t\treturn t, err\n\t}\n\tt = types.ImageInspect{}\n\terr = json.Unmarshal(output, &t)\n\tif err != nil {\n\t\tlogrus.Debugf(\"Error in unmarshalling json %s: %s.\", output, err)\n\t}\n\treturn t, err\n}",
"func ExecPodman(dest entities.ImageScpOptions, podman string, command []string) (string, error) {\n\tcmd := exec.Command(podman)\n\tCreateSCPCommand(cmd, command[1:])\n\tlogrus.Debugf(\"Executing podman command: %q\", cmd)\n\tif strings.Contains(strings.Join(command, \" \"), \"load\") { // need to tag\n\t\tif len(dest.Tag) > 0 {\n\t\t\treturn \"\", ScpTag(cmd, podman, dest)\n\t\t}\n\t\tcmd.Stdout = nil\n\t\tout, err := cmd.Output()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\timg := ExtractImage(out)\n\t\treturn img, nil\n\t}\n\treturn \"\", cmd.Run()\n}",
"func PullImage(\n\tctx context.Context,\n\treq docker.PullImage,\n\tp events.Publisher[docker.Event],\n\twg *waitgroupx.Group,\n\tlog *logrus.Entry,\n\tgetPullCommand func(docker.PullImage, string) (string, []string),\n) (err error) {\n\tif err = p.Publish(ctx, docker.NewBeginStatsEvent(docker.ImagePullStatsKind)); err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\tif err = p.Publish(ctx, docker.NewEndStatsEvent(docker.ImagePullStatsKind)); err != nil {\n\t\t\tlog.WithError(err).Warn(\"did not send image pull done stats\")\n\t\t}\n\t}()\n\n\timage := CanonicalizeImage(req.Name)\n\n\turi, err := url.Parse(image)\n\tif err != nil || uri.Scheme == \"\" {\n\t\tif err = p.Publish(ctx, docker.NewLogEvent(\n\t\t\tmodel.LogLevelInfo,\n\t\t\tfmt.Sprintf(\"image %s isn't a pullable URI; skipping pull\", image),\n\t\t)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t// TODO(DET-9078): Support registry auth. Investigate other auth mechanisms\n\t// with singularity & podman.\n\tcommand, args := getPullCommand(req, image)\n\n\tif err = PprintCommand(ctx, command, args, p, log); err != nil {\n\t\treturn err\n\t}\n\n\tcmd := exec.CommandContext(ctx, command, args...) // #nosec G204 'command' is under our control\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating stdout pipe: %w\", err)\n\t}\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating stderr pipe: %w\", err)\n\t}\n\n\t// The return codes from `podman pull` aren't super helpful in determining the error, so we\n\t// wrap the publisher and skim logs to see what happened as we ship them.\n\tignoreErrorsSig := make(chan bool)\n\tcheckIgnoreErrors := events.FuncPublisher[docker.Event](\n\t\tfunc(ctx context.Context, t docker.Event) error {\n\t\t\tif t.Log != nil && strings.Contains(t.Log.Message, \"Image file already exists\") {\n\t\t\t\tignoreErrorsSig <- true\n\t\t\t}\n\t\t\treturn p.Publish(ctx, t)\n\t\t},\n\t)\n\twg.Go(func(ctx context.Context) { ShipContainerCommandLogs(ctx, stdout, stdcopy.Stdout, p) })\n\twg.Go(func(ctx context.Context) {\n\t\tdefer close(ignoreErrorsSig)\n\t\tShipContainerCommandLogs(ctx, stderr, stdcopy.Stderr, checkIgnoreErrors)\n\t})\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn fmt.Errorf(\"starting pull command: %w\", err)\n\t}\n\n\tvar ignoreErrors bool\n\tselect {\n\tcase ignoreErrors = <-ignoreErrorsSig:\n\tcase <-ctx.Done():\n\t\treturn ctx.Err()\n\t}\n\n\tif err = cmd.Wait(); err != nil && !ignoreErrors {\n\t\treturn fmt.Errorf(\"pulling %s: %w\", image, err)\n\t}\n\treturn nil\n}",
"func GetPodman(ch chan<- SourceReturn, conf *Conf) {\n\tc := conf.Podman\n\t// Check for warnOnly override\n\tif c.WarnOnly == nil {\n\t\tc.WarnOnly = &conf.WarnOnly\n\t}\n\tsr := NewSourceReturn(conf.debug)\n\tdefer func() {\n\t\tch <- sr.Return(&c.ConfBase)\n\t}()\n\t// Check if we are root\n\trunningUser, err := user.Current()\n\tif err == nil && runningUser.Uid == \"0\" {\n\t\t// Do not run sudo as root, there's no point\n\t\tc.IncludeSudo = false\n\t\tc.Sudo = false\n\t}\n\tif !c.IncludeSudo {\n\t\tcl, err := getContainersExec(true, c.Sudo)\n\t\tif err != nil {\n\t\t\terr = &ModuleNotAvailable{\"podman\", err}\n\t\t\tsr.Header = fmt.Sprintf(\"%s: %s\\n\", utils.Wrap(\"Podman\", c.padL, c.padR), utils.Warn(\"unavailable\"))\n\t\t} else {\n\t\t\tsr.Header, sr.Content, sr.Error = cl.toHeaderContent(c.Ignore, *c.WarnOnly, c.padL, c.padR)\n\t\t}\n\t} else {\n\t\tclUser, errUser := getContainersExec(true, false)\n\t\tclRoot, errRoot := getContainersExec(true, true)\n\t\t// Combine lists for now\n\t\tcl := containerList{Runtime: \"Podman\", Root: true}\n\t\t// Add # in front of root containers\n\t\tfor _, c := range clRoot.Containers {\n\t\t\tcl.Containers = append(cl.Containers, containerStatus{\n\t\t\t\tName: \"# \" + c.Name,\n\t\t\t\tStatus: c.Status,\n\t\t\t})\n\t\t}\n\t\t// Add $ in front of user containers\n\t\tfor _, c := range clUser.Containers {\n\t\t\tcl.Containers = append(cl.Containers, containerStatus{\n\t\t\t\tName: \"$ \" + c.Name,\n\t\t\t\tStatus: c.Status,\n\t\t\t})\n\t\t}\n\t\tif len(cl.Containers) == 0 && (errUser != nil || errRoot != nil) {\n\t\t\terr = &ModuleNotAvailable{\"podman\", err}\n\t\t\tsr.Header = fmt.Sprintf(\"%s: %s\\n\", utils.Wrap(\"Podman\", c.padL, c.padR), utils.Warn(\"unavailable\"))\n\t\t} else {\n\t\t\tsr.Header, sr.Content, sr.Error = cl.toHeaderContent(c.Ignore, *c.WarnOnly, c.padL, c.padR)\n\t\t}\n\t}\n\treturn\n}",
"func (daemon *Daemon) ImageService() ImageService {\n\treturn daemon.imageService\n}",
"func componentImage(name string, v semver.Version, mirror string) string {\n\treturn fmt.Sprintf(\"%s:v%s\", path.Join(kubernetesRepo(mirror), name), v)\n}",
"func formatPodImage(raw string) (result string) {\n\ts := strings.Split(raw, \":\")\n\tif len(s) == 3 {\n\t\tresult = s[2]\n\t}\n\treturn\n}",
"func RunPodmanContainer(ctx context.Context, jt drmaa2interface.JobTemplate, disablePull bool) (string, error) {\n\tspec, err := CreateContainerSpec(jt)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tr, err := containers.CreateWithSpec(ctx, spec, &containers.CreateOptions{})\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn \"\", err\n\t}\n\n\tif jt.OutputPath == \"\" && jt.ErrorPath == \"\" {\n\t\treturn r.ID, containers.Start(ctx, r.ID, &containers.StartOptions{})\n\t}\n\n\t// if stdout and stderr is set attach to container\n\terr = containers.Start(ctx, r.ID, &containers.StartOptions{})\n\tif err != nil {\n\t\treturn r.ID, err\n\t}\n\n\tvar stdoutCh chan string = nil\n\tvar stderrCh chan string = nil\n\n\tuseStdout := false\n\tstdout, stdoutOpened, err := setWriterOrNot(jt.OutputPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif stdout != nil {\n\t\tstdoutCh = make(chan string, 512)\n\t\tuseStdout = true\n\t}\n\tuseStderr := false\n\tstderr, stderrOpened, err := setWriterOrNot(jt.ErrorPath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif stderr != nil {\n\t\tstderrCh = make(chan string, 512)\n\t\tuseStderr = true\n\t}\n\tt := true\n\tgo func() {\n\t\terr = containers.Logs(ctx, r.ID, &containers.LogOptions{Follow: &t, Stderr: &useStderr, Stdout: &useStdout},\n\t\t\tstdoutCh, stderrCh)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"failed attaching to logs: %v\\n\", err)\n\t\t}\n\t}()\n\tif useStdout {\n\t\tgo func() {\n\t\t\tfor line := range stdoutCh {\n\t\t\t\tfmt.Fprintf(stdout, \"%s\\n\", line)\n\t\t\t}\n\t\t\tif stdoutOpened {\n\t\t\t\tstdout.Close()\n\t\t\t}\n\t\t}()\n\t}\n\n\tif useStderr {\n\t\tgo func() {\n\t\t\tfor line := range stderrCh {\n\t\t\t\tfmt.Fprintf(stderr, \"%s\\n\", line)\n\t\t\t}\n\t\t\tif stderrOpened {\n\t\t\t\tstderr.Close()\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn r.ID, nil\n}",
"func (s *VarlinkInterface) VarlinkGetDescription() string {\n\treturn `# Podman Service Interface and API description. The master version of this document can be found\n# in the [API.md](https://github.com/containers/libpod/blob/master/API.md) file in the upstream libpod repository.\ninterface io.podman\n\ntype Volume (\n name: string,\n labels: [string]string,\n mountPoint: string,\n driver: string,\n options: [string]string,\n scope: string\n)\n\ntype NotImplemented (\n comment: string\n)\n\ntype StringResponse (\n message: string\n)\n\ntype LogLine (\n device: string,\n parseLogType : string,\n time: string,\n msg: string,\n cid: string\n)\n\n# ContainerChanges describes the return struct for ListContainerChanges\ntype ContainerChanges (\n changed: []string,\n added: []string,\n deleted: []string\n)\n\ntype ImageSaveOptions (\n name: string,\n format: string,\n output: string,\n outputType: string,\n moreTags: []string,\n quiet: bool,\n compress: bool\n)\n\ntype VolumeCreateOpts (\n volumeName: string,\n driver: string,\n labels: [string]string,\n options: [string]string\n)\n\ntype VolumeRemoveOpts (\n volumes: []string,\n all: bool,\n force: bool\n)\n\ntype Image (\n id: string,\n digest: string,\n parentId: string,\n repoTags: []string,\n repoDigests: []string,\n created: string, # as RFC3339\n size: int,\n virtualSize: int,\n containers: int,\n labels: [string]string,\n isParent: bool,\n topLayer: string,\n readOnly: bool\n)\n\n# ImageHistory describes the returned structure from ImageHistory.\ntype ImageHistory (\n id: string,\n created: string, # as RFC3339\n createdBy: string,\n tags: []string,\n size: int,\n comment: string\n)\n\n# Represents a single search result from SearchImages\ntype ImageSearchResult (\n description: string,\n is_official: bool,\n is_automated: bool,\n registry: string,\n name: string,\n star_count: int\n)\n\ntype ImageSearchFilter (\n is_official: ?bool,\n is_automated: ?bool,\n star_count: int\n)\n\ntype KubePodService (\n pod: string,\n service: string\n)\n\ntype Container (\n id: string,\n image: string,\n imageid: string,\n command: []string,\n createdat: string, # as RFC3339\n runningfor: string,\n status: string,\n ports: []ContainerPortMappings,\n rootfssize: int,\n rwsize: int,\n names: string,\n labels: [string]string,\n mounts: []ContainerMount,\n containerrunning: bool,\n namespaces: ContainerNameSpace\n)\n\n# ContainerStats is the return struct for the stats of a container\ntype ContainerStats (\n id: string,\n name: string,\n cpu: float,\n cpu_nano: int,\n system_nano: int,\n mem_usage: int,\n mem_limit: int,\n mem_perc: float,\n net_input: int,\n net_output: int,\n block_output: int,\n block_input: int,\n pids: int\n)\n\ntype PsOpts (\n all: bool,\n filters: ?[]string,\n last: ?int,\n latest: ?bool,\n noTrunc: ?bool,\n pod: ?bool,\n quiet: ?bool,\n size: ?bool,\n sort: ?string,\n sync: ?bool\n)\n\ntype PsContainer (\n id: string,\n image: string,\n command: string,\n created: string,\n ports: string,\n names: string,\n isInfra: bool,\n status: string,\n state: string,\n pidNum: int,\n rootFsSize: int,\n rwSize: int,\n pod: string,\n createdAt: string,\n exitedAt: string,\n startedAt: string,\n labels: [string]string,\n nsPid: string,\n cgroup: string,\n ipc: string,\n mnt: string,\n net: string,\n pidNs: string,\n user: string,\n uts: string,\n mounts: string\n)\n\n# ContainerMount describes the struct for mounts in a container\ntype ContainerMount (\n destination: string,\n type: string,\n source: string,\n options: []string\n)\n\n# ContainerPortMappings describes the struct for portmappings in an existing container\ntype ContainerPortMappings (\n host_port: string,\n host_ip: string,\n protocol: string,\n container_port: string\n)\n\n# ContainerNamespace describes the namespace structure for an existing container\ntype ContainerNameSpace (\n user: string,\n uts: string,\n pidns: string,\n pid: string,\n cgroup: string,\n net: string,\n mnt: string,\n ipc: string\n)\n\n# InfoDistribution describes the host's distribution\ntype InfoDistribution (\n distribution: string,\n version: string\n)\n\n# InfoHost describes the host stats portion of PodmanInfo\ntype InfoHost (\n buildah_version: string,\n distribution: InfoDistribution,\n mem_free: int,\n mem_total: int,\n swap_free: int,\n swap_total: int,\n arch: string,\n cpus: int,\n hostname: string,\n kernel: string,\n os: string,\n uptime: string,\n eventlogger: string\n)\n\n# InfoGraphStatus describes the detailed status of the storage driver\ntype InfoGraphStatus (\n backing_filesystem: string,\n native_overlay_diff: string,\n supports_d_type: string\n)\n\n# InfoStore describes the host's storage informatoin\ntype InfoStore (\n containers: int,\n images: int,\n graph_driver_name: string,\n graph_driver_options: string,\n graph_root: string,\n graph_status: InfoGraphStatus,\n run_root: string\n)\n\n# InfoPodman provides details on the podman binary\ntype InfoPodmanBinary (\n compiler: string,\n go_version: string,\n podman_version: string,\n git_commit: string\n)\n\n# PodmanInfo describes the Podman host and build\ntype PodmanInfo (\n host: InfoHost,\n registries: []string,\n insecure_registries: []string,\n store: InfoStore,\n podman: InfoPodmanBinary\n)\n\n# Sockets describes sockets location for a container\ntype Sockets(\n container_id: string,\n io_socket: string,\n control_socket: string\n)\n\n# Create is an input structure for creating containers.\ntype Create (\n args: []string,\n addHost: ?[]string,\n annotation: ?[]string,\n attach: ?[]string,\n blkioWeight: ?string,\n blkioWeightDevice: ?[]string,\n capAdd: ?[]string,\n capDrop: ?[]string,\n cgroupParent: ?string,\n cidFile: ?string,\n conmonPidfile: ?string,\n command: ?[]string,\n cpuPeriod: ?int,\n cpuQuota: ?int,\n cpuRtPeriod: ?int,\n cpuRtRuntime: ?int,\n cpuShares: ?int,\n cpus: ?float,\n cpuSetCpus: ?string,\n cpuSetMems: ?string,\n detach: ?bool,\n detachKeys: ?string,\n device: ?[]string,\n deviceReadBps: ?[]string,\n deviceReadIops: ?[]string,\n deviceWriteBps: ?[]string,\n deviceWriteIops: ?[]string,\n dns: ?[]string,\n dnsOpt: ?[]string,\n dnsSearch: ?[]string,\n dnsServers: ?[]string,\n entrypoint: ?string,\n env: ?[]string,\n envFile: ?[]string,\n expose: ?[]string,\n gidmap: ?[]string,\n groupadd: ?[]string,\n healthcheckCommand: ?string,\n healthcheckInterval: ?string,\n healthcheckRetries: ?int,\n healthcheckStartPeriod: ?string,\n healthcheckTimeout:?string,\n hostname: ?string,\n imageVolume: ?string,\n init: ?bool,\n initPath: ?string,\n interactive: ?bool,\n ip: ?string,\n ipc: ?string,\n kernelMemory: ?string,\n label: ?[]string,\n labelFile: ?[]string,\n logDriver: ?string,\n logOpt: ?[]string,\n macAddress: ?string,\n memory: ?string,\n memoryReservation: ?string,\n memorySwap: ?string,\n memorySwappiness: ?int,\n name: ?string,\n net: ?string,\n network: ?string,\n noHosts: ?bool,\n oomKillDisable: ?bool,\n oomScoreAdj: ?int,\n pid: ?string,\n pidsLimit: ?int,\n pod: ?string,\n privileged: ?bool,\n publish: ?[]string,\n publishAll: ?bool,\n quiet: ?bool,\n readonly: ?bool,\n readonlytmpfs: ?bool,\n restart: ?string,\n rm: ?bool,\n rootfs: ?bool,\n securityOpt: ?[]string,\n shmSize: ?string,\n stopSignal: ?string,\n stopTimeout: ?int,\n storageOpt: ?[]string,\n subuidname: ?string,\n subgidname: ?string,\n sysctl: ?[]string,\n systemd: ?bool,\n tmpfs: ?[]string,\n tty: ?bool,\n uidmap: ?[]string,\n ulimit: ?[]string,\n user: ?string,\n userns: ?string,\n uts: ?string,\n mount: ?[]string,\n volume: ?[]string,\n volumesFrom: ?[]string,\n workDir: ?string\n)\n\n# BuildOptions are are used to describe describe physical attributes of the build\ntype BuildOptions (\n addHosts: []string,\n cgroupParent: string,\n cpuPeriod: int,\n cpuQuota: int,\n cpuShares: int,\n cpusetCpus: string,\n cpusetMems: string,\n memory: int,\n memorySwap: int,\n shmSize: string,\n ulimit: []string,\n volume: []string\n)\n\n# BuildInfo is used to describe user input for building images\ntype BuildInfo (\n additionalTags: []string,\n annotations: []string,\n buildArgs: [string]string,\n buildOptions: BuildOptions,\n cniConfigDir: string,\n cniPluginDir: string,\n compression: string,\n contextDir: string,\n defaultsMountFilePath: string,\n dockerfiles: []string,\n err: string,\n forceRmIntermediateCtrs: bool,\n iidfile: string,\n label: []string,\n layers: bool,\n nocache: bool,\n out: string,\n output: string,\n outputFormat: string,\n pullPolicy: string,\n quiet: bool,\n remoteIntermediateCtrs: bool,\n reportWriter: string,\n runtimeArgs: []string,\n squash: bool\n)\n\n# MoreResponse is a struct for when responses from varlink requires longer output\ntype MoreResponse (\n logs: []string,\n id: string\n)\n\n# ListPodContainerInfo is a returned struct for describing containers\n# in a pod.\ntype ListPodContainerInfo (\n name: string,\n id: string,\n status: string\n)\n\n# PodCreate is an input structure for creating pods.\n# It emulates options to podman pod create. The infraCommand and\n# infraImage options are currently NotSupported.\ntype PodCreate (\n name: string,\n cgroupParent: string,\n labels: [string]string,\n share: []string,\n infra: bool,\n infraCommand: string,\n infraImage: string,\n publish: []string\n)\n\n# ListPodData is the returned struct for an individual pod\ntype ListPodData (\n id: string,\n name: string,\n createdat: string,\n cgroup: string,\n status: string,\n labels: [string]string,\n numberofcontainers: string,\n containersinfo: []ListPodContainerInfo\n)\n\ntype PodContainerErrorData (\n containerid: string,\n reason: string\n)\n\n# Runlabel describes the required input for container runlabel\ntype Runlabel(\n image: string,\n authfile: string,\n display: bool,\n name: string,\n pull: bool,\n label: string,\n extraArgs: []string,\n opts: [string]string\n)\n\n# Event describes a libpod struct\ntype Event(\n # TODO: make status and type a enum at some point?\n # id is the container, volume, pod, image ID\n id: string,\n # image is the image name where applicable\n image: string,\n # name is the name of the pod, container, image\n name: string,\n # status describes the event that happened (i.e. create, remove, ...)\n status: string,\n # time the event happened\n time: string,\n # type describes object the event happened with (image, container...)\n type: string\n)\n\ntype DiffInfo(\n # path that is different\n path: string,\n # Add, Delete, Modify\n changeType: string\n)\n\ntype ExecOpts(\n # container name or id\n name: string,\n # Create pseudo tty\n tty: bool,\n # privileged access in container\n privileged: bool,\n # command to execute in container\n cmd: []string,\n # user to use in container\n user: ?string,\n # workdir to run command in container\n workdir: ?string,\n # slice of keyword=value environment variables\n env: ?[]string,\n # string of detach keys\n detachKeys: ?string\n)\n\n# GetVersion returns version and build information of the podman service\nmethod GetVersion() -> (\n version: string,\n go_version: string,\n git_commit: string,\n built: string, # as RFC3339\n os_arch: string,\n remote_api_version: int\n)\n\n# GetInfo returns a [PodmanInfo](#PodmanInfo) struct that describes podman and its host such as storage stats,\n# build information of Podman, and system-wide registries.\nmethod GetInfo() -> (info: PodmanInfo)\n\n# ListContainers returns information about all containers.\n# See also [GetContainer](#GetContainer).\nmethod ListContainers() -> (containers: []Container)\n\nmethod Ps(opts: PsOpts) -> (containers: []PsContainer)\n\nmethod GetContainersByStatus(status: []string) -> (containerS: []Container)\n\nmethod Top (nameOrID: string, descriptors: []string) -> (top: []string)\n\n# GetContainer returns information about a single container. If a container\n# with the given id doesn't exist, a [ContainerNotFound](#ContainerNotFound)\n# error will be returned. See also [ListContainers](ListContainers) and\n# [InspectContainer](#InspectContainer).\nmethod GetContainer(id: string) -> (container: Container)\n\n# GetContainersByContext allows you to get a list of container ids depending on all, latest, or a list of\n# container names. The definition of latest container means the latest by creation date. In a multi-\n# user environment, results might differ from what you expect.\nmethod GetContainersByContext(all: bool, latest: bool, args: []string) -> (containers: []string)\n\n# CreateContainer creates a new container from an image. It uses a [Create](#Create) type for input.\nmethod CreateContainer(create: Create) -> (container: string)\n\n# InspectContainer data takes a name or ID of a container returns the inspection\n# data in string format. You can then serialize the string into JSON. A [ContainerNotFound](#ContainerNotFound)\n# error will be returned if the container cannot be found. See also [InspectImage](#InspectImage).\nmethod InspectContainer(name: string) -> (container: string)\n\n# ListContainerProcesses takes a name or ID of a container and returns the processes\n# running inside the container as array of strings. It will accept an array of string\n# arguments that represent ps options. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound)\n# error will be returned.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.ListContainerProcesses '{\"name\": \"135d71b9495f\", \"opts\": []}'\n# {\n# \"container\": [\n# \" UID PID PPID C STIME TTY TIME CMD\",\n# \" 0 21220 21210 0 09:05 pts/0 00:00:00 /bin/sh\",\n# \" 0 21232 21220 0 09:05 pts/0 00:00:00 top\",\n# \" 0 21284 21220 0 09:05 pts/0 00:00:00 vi /etc/hosts\"\n# ]\n# }\n# ~~~\nmethod ListContainerProcesses(name: string, opts: []string) -> (container: []string)\n\n# GetContainerLogs takes a name or ID of a container and returns the logs of that container.\n# If the container cannot be found, a [ContainerNotFound](#ContainerNotFound) error will be returned.\n# The container logs are returned as an array of strings. GetContainerLogs will honor the streaming\n# capability of varlink if the client invokes it.\nmethod GetContainerLogs(name: string) -> (container: []string)\n\nmethod GetContainersLogs(names: []string, follow: bool, latest: bool, since: string, tail: int, timestamps: bool) -> (log: LogLine)\n\n# ListContainerChanges takes a name or ID of a container and returns changes between the container and\n# its base image. It returns a struct of changed, deleted, and added path names.\nmethod ListContainerChanges(name: string) -> (container: ContainerChanges)\n\n# ExportContainer creates an image from a container. It takes the name or ID of a container and a\n# path representing the target tarfile. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound)\n# error will be returned.\n# The return value is the written tarfile.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.ExportContainer '{\"name\": \"flamboyant_payne\", \"path\": \"/tmp/payne.tar\" }'\n# {\n# \"tarfile\": \"/tmp/payne.tar\"\n# }\n# ~~~\nmethod ExportContainer(name: string, path: string) -> (tarfile: string)\n\n# GetContainerStats takes the name or ID of a container and returns a single ContainerStats structure which\n# contains attributes like memory and cpu usage. If the container cannot be found, a\n# [ContainerNotFound](#ContainerNotFound) error will be returned. If the container is not running, a [NoContainerRunning](#NoContainerRunning)\n# error will be returned\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.GetContainerStats '{\"name\": \"c33e4164f384\"}'\n# {\n# \"container\": {\n# \"block_input\": 0,\n# \"block_output\": 0,\n# \"cpu\": 2.571123918839990154678e-08,\n# \"cpu_nano\": 49037378,\n# \"id\": \"c33e4164f384aa9d979072a63319d66b74fd7a128be71fa68ede24f33ec6cfee\",\n# \"mem_limit\": 33080606720,\n# \"mem_perc\": 2.166828456524753747370e-03,\n# \"mem_usage\": 716800,\n# \"name\": \"competent_wozniak\",\n# \"net_input\": 768,\n# \"net_output\": 5910,\n# \"pids\": 1,\n# \"system_nano\": 10000000\n# }\n# }\n# ~~~\nmethod GetContainerStats(name: string) -> (container: ContainerStats)\n\n# GetContainerStatsWithHistory takes a previous set of container statistics and uses libpod functions\n# to calculate the containers statistics based on current and previous measurements.\nmethod GetContainerStatsWithHistory(previousStats: ContainerStats) -> (container: ContainerStats)\n\n# This method has not be implemented yet.\n# method ResizeContainerTty() -> (notimplemented: NotImplemented)\n\n# StartContainer starts a created or stopped container. It takes the name or ID of container. It returns\n# the container ID once started. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound)\n# error will be returned. See also [CreateContainer](#CreateContainer).\nmethod StartContainer(name: string) -> (container: string)\n\n# StopContainer stops a container given a timeout. It takes the name or ID of a container as well as a\n# timeout value. The timeout value the time before a forcible stop to the container is applied. It\n# returns the container ID once stopped. If the container cannot be found, a [ContainerNotFound](#ContainerNotFound)\n# error will be returned instead. See also [KillContainer](KillContainer).\n# #### Error\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.StopContainer '{\"name\": \"135d71b9495f\", \"timeout\": 5}'\n# {\n# \"container\": \"135d71b9495f7c3967f536edad57750bfdb569336cd107d8aabab45565ffcfb6\"\n# }\n# ~~~\nmethod StopContainer(name: string, timeout: int) -> (container: string)\n\n# InitContainer initializes the given container. It accepts a container name or\n# ID, and will initialize the container matching that ID if possible, and error\n# if not. Containers can only be initialized when they are in the Created or\n# Exited states. Initialization prepares a container to be started, but does not\n# start the container. It is intended to be used to debug a container's state\n# prior to starting it.\nmethod InitContainer(name: string) -> (container: string)\n\n# RestartContainer will restart a running container given a container name or ID and timeout value. The timeout\n# value is the time before a forcible stop is used to stop the container. If the container cannot be found by\n# name or ID, a [ContainerNotFound](#ContainerNotFound) error will be returned; otherwise, the ID of the\n# container will be returned.\nmethod RestartContainer(name: string, timeout: int) -> (container: string)\n\n# KillContainer takes the name or ID of a container as well as a signal to be applied to the container. Once the\n# container has been killed, the container's ID is returned. If the container cannot be found, a\n# [ContainerNotFound](#ContainerNotFound) error is returned. See also [StopContainer](StopContainer).\nmethod KillContainer(name: string, signal: int) -> (container: string)\n\n# This method has not be implemented yet.\n# method UpdateContainer() -> (notimplemented: NotImplemented)\n\n# This method has not be implemented yet.\n# method RenameContainer() -> (notimplemented: NotImplemented)\n\n# PauseContainer takes the name or ID of container and pauses it. If the container cannot be found,\n# a [ContainerNotFound](#ContainerNotFound) error will be returned; otherwise the ID of the container is returned.\n# See also [UnpauseContainer](#UnpauseContainer).\nmethod PauseContainer(name: string) -> (container: string)\n\n# UnpauseContainer takes the name or ID of container and unpauses a paused container. If the container cannot be\n# found, a [ContainerNotFound](#ContainerNotFound) error will be returned; otherwise the ID of the container is returned.\n# See also [PauseContainer](#PauseContainer).\nmethod UnpauseContainer(name: string) -> (container: string)\n\n# Attach takes the name or ID of a container and sets up the ability to remotely attach to its console. The start\n# bool is whether you wish to start the container in question first.\nmethod Attach(name: string, detachKeys: string, start: bool) -> ()\n\nmethod AttachControl(name: string) -> ()\n\n# GetAttachSockets takes the name or ID of an existing container. It returns file paths for two sockets needed\n# to properly communicate with a container. The first is the actual I/O socket that the container uses. The\n# second is a \"control\" socket where things like resizing the TTY events are sent. If the container cannot be\n# found, a [ContainerNotFound](#ContainerNotFound) error will be returned.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/io.podman/io.podman.GetAttachSockets '{\"name\": \"b7624e775431219161\"}'\n# {\n# \"sockets\": {\n# \"container_id\": \"b7624e7754312191613245ce1a46844abee60025818fe3c3f3203435623a1eca\",\n# \"control_socket\": \"/var/lib/containers/storage/overlay-containers/b7624e7754312191613245ce1a46844abee60025818fe3c3f3203435623a1eca/userdata/ctl\",\n# \"io_socket\": \"/var/run/libpod/socket/b7624e7754312191613245ce1a46844abee60025818fe3c3f3203435623a1eca/attach\"\n# }\n# }\n# ~~~\nmethod GetAttachSockets(name: string) -> (sockets: Sockets)\n\n# WaitContainer takes the name or ID of a container and waits the given interval in milliseconds until the container\n# stops. Upon stopping, the return code of the container is returned. If the container container cannot be found by ID\n# or name, a [ContainerNotFound](#ContainerNotFound) error is returned.\nmethod WaitContainer(name: string, interval: int) -> (exitcode: int)\n\n# RemoveContainer requires the name or ID of container as well a boolean representing whether a running container can be stopped and removed, and a boolean\n# indicating whether to remove builtin volumes. Upon successful removal of the\n# container, its ID is returned. If the\n# container cannot be found by name or ID, a [ContainerNotFound](#ContainerNotFound) error will be returned.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.RemoveContainer '{\"name\": \"62f4fd98cb57\"}'\n# {\n# \"container\": \"62f4fd98cb57f529831e8f90610e54bba74bd6f02920ffb485e15376ed365c20\"\n# }\n# ~~~\nmethod RemoveContainer(name: string, force: bool, removeVolumes: bool) -> (container: string)\n\n# DeleteStoppedContainers will delete all containers that are not running. It will return a list the deleted\n# container IDs. See also [RemoveContainer](RemoveContainer).\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteStoppedContainers\n# {\n# \"containers\": [\n# \"451410b931d00def8aa9b4f8084e4d4a39e5e04ea61f358cf53a5cf95afcdcee\",\n# \"8b60f754a3e01389494a9581ade97d35c2765b6e2f19acd2d3040c82a32d1bc0\",\n# \"cf2e99d4d3cad6073df199ed32bbe64b124f3e1aba6d78821aa8460e70d30084\",\n# \"db901a329587312366e5ecff583d08f0875b4b79294322df67d90fc6eed08fc1\"\n# ]\n# }\n# ~~~\nmethod DeleteStoppedContainers() -> (containers: []string)\n\n# ListImages returns information about the images that are currently in storage.\n# See also [InspectImage](#InspectImage).\nmethod ListImages() -> (images: []Image)\n\n# GetImage returns information about a single image in storage.\n# If the image caGetImage returns be found, [ImageNotFound](#ImageNotFound) will be returned.\nmethod GetImage(id: string) -> (image: Image)\n\n# BuildImage takes a [BuildInfo](#BuildInfo) structure and builds an image. At a minimum, you must provide the\n# 'dockerfile' and 'tags' options in the BuildInfo structure. It will return a [MoreResponse](#MoreResponse) structure\n# that contains the build logs and resulting image ID.\nmethod BuildImage(build: BuildInfo) -> (image: MoreResponse)\n\n# This function is not implemented yet.\n# method CreateImage() -> (notimplemented: NotImplemented)\n\n# InspectImage takes the name or ID of an image and returns a string representation of data associated with the\n#image. You must serialize the string into JSON to use it further. An [ImageNotFound](#ImageNotFound) error will\n# be returned if the image cannot be found.\nmethod InspectImage(name: string) -> (image: string)\n\n# HistoryImage takes the name or ID of an image and returns information about its history and layers. The returned\n# history is in the form of an array of ImageHistory structures. If the image cannot be found, an\n# [ImageNotFound](#ImageNotFound) error is returned.\nmethod HistoryImage(name: string) -> (history: []ImageHistory)\n\n# PushImage takes two input arguments: the name or ID of an image, the fully-qualified destination name of the image,\n# It will return an [ImageNotFound](#ImageNotFound) error if\n# the image cannot be found in local storage; otherwise it will return a [MoreResponse](#MoreResponse)\nmethod PushImage(name: string, tag: string, compress: bool, format: string, removeSignatures: bool, signBy: string) -> (reply: MoreResponse)\n\n# TagImage takes the name or ID of an image in local storage as well as the desired tag name. If the image cannot\n# be found, an [ImageNotFound](#ImageNotFound) error will be returned; otherwise, the ID of the image is returned on success.\nmethod TagImage(name: string, tagged: string) -> (image: string)\n\n# RemoveImage takes the name or ID of an image as well as a boolean that determines if containers using that image\n# should be deleted. If the image cannot be found, an [ImageNotFound](#ImageNotFound) error will be returned. The\n# ID of the removed image is returned when complete. See also [DeleteUnusedImages](DeleteUnusedImages).\n# #### Example\n# ~~~\n# varlink call -m unix:/run/podman/io.podman/io.podman.RemoveImage '{\"name\": \"registry.fedoraproject.org/fedora\", \"force\": true}'\n# {\n# \"image\": \"426866d6fa419873f97e5cbd320eeb22778244c1dfffa01c944db3114f55772e\"\n# }\n# ~~~\nmethod RemoveImage(name: string, force: bool) -> (image: string)\n\n# SearchImages searches available registries for images that contain the\n# contents of \"query\" in their name. If \"limit\" is given, limits the amount of\n# search results per registry.\nmethod SearchImages(query: string, limit: ?int, filter: ImageSearchFilter) -> (results: []ImageSearchResult)\n\n# DeleteUnusedImages deletes any images not associated with a container. The IDs of the deleted images are returned\n# in a string array.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.DeleteUnusedImages\n# {\n# \"images\": [\n# \"166ea6588079559c724c15223f52927f514f73dd5c5cf2ae2d143e3b2e6e9b52\",\n# \"da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e\",\n# \"3ef70f7291f47dfe2b82931a993e16f5a44a0e7a68034c3e0e086d77f5829adc\",\n# \"59788edf1f3e78cd0ebe6ce1446e9d10788225db3dedcfd1a59f764bad2b2690\"\n# ]\n# }\n# ~~~\nmethod DeleteUnusedImages() -> (images: []string)\n\n# Commit, creates an image from an existing container. It requires the name or\n# ID of the container as well as the resulting image name. Optionally, you can define an author and message\n# to be added to the resulting image. You can also define changes to the resulting image for the following\n# attributes: _CMD, ENTRYPOINT, ENV, EXPOSE, LABEL, ONBUILD, STOPSIGNAL, USER, VOLUME, and WORKDIR_. To pause the\n# container while it is being committed, pass a _true_ bool for the pause argument. If the container cannot\n# be found by the ID or name provided, a (ContainerNotFound)[#ContainerNotFound] error will be returned; otherwise,\n# the resulting image's ID will be returned as a string inside a MoreResponse.\nmethod Commit(name: string, image_name: string, changes: []string, author: string, message: string, pause: bool, manifestType: string) -> (reply: MoreResponse)\n\n# ImportImage imports an image from a source (like tarball) into local storage. The image can have additional\n# descriptions added to it using the message and changes options. See also [ExportImage](ExportImage).\nmethod ImportImage(source: string, reference: string, message: string, changes: []string, delete: bool) -> (image: string)\n\n# ExportImage takes the name or ID of an image and exports it to a destination like a tarball. There is also\n# a boolean option to force compression. It also takes in a string array of tags to be able to save multiple\n# tags of the same image to a tarball (each tag should be of the form <image>:<tag>). Upon completion, the ID\n# of the image is returned. If the image cannot be found in local storage, an [ImageNotFound](#ImageNotFound)\n# error will be returned. See also [ImportImage](ImportImage).\nmethod ExportImage(name: string, destination: string, compress: bool, tags: []string) -> (image: string)\n\n# PullImage pulls an image from a repository to local storage. After a successful pull, the image id and logs\n# are returned as a [MoreResponse](#MoreResponse). This connection also will handle a WantsMores request to send\n# status as it occurs.\nmethod PullImage(name: string) -> (reply: MoreResponse)\n\n# CreatePod creates a new empty pod. It uses a [PodCreate](#PodCreate) type for input.\n# On success, the ID of the newly created pod will be returned.\n# #### Example\n# ~~~\n# $ varlink call unix:/run/podman/io.podman/io.podman.CreatePod '{\"create\": {\"name\": \"test\"}}'\n# {\n# \"pod\": \"b05dee7bd4ccfee688099fe1588a7a898d6ddd6897de9251d4671c9b0feacb2a\"\n# }\n#\n# $ varlink call unix:/run/podman/io.podman/io.podman.CreatePod '{\"create\": {\"infra\": true, \"share\": [\"ipc\", \"net\", \"uts\"]}}'\n# {\n# \"pod\": \"d7697449a8035f613c1a8891286502aca68fff7d5d49a85279b3bda229af3b28\"\n# }\n# ~~~\nmethod CreatePod(create: PodCreate) -> (pod: string)\n\n# ListPods returns a list of pods in no particular order. They are\n# returned as an array of ListPodData structs. See also [GetPod](#GetPod).\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.ListPods\n# {\n# \"pods\": [\n# {\n# \"cgroup\": \"machine.slice\",\n# \"containersinfo\": [\n# {\n# \"id\": \"00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45\",\n# \"name\": \"1840835294cf-infra\",\n# \"status\": \"running\"\n# },\n# {\n# \"id\": \"49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6\",\n# \"name\": \"upbeat_murdock\",\n# \"status\": \"running\"\n# }\n# ],\n# \"createdat\": \"2018-12-07 13:10:15.014139258 -0600 CST\",\n# \"id\": \"1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f\",\n# \"name\": \"foobar\",\n# \"numberofcontainers\": \"2\",\n# \"status\": \"Running\"\n# },\n# {\n# \"cgroup\": \"machine.slice\",\n# \"containersinfo\": [\n# {\n# \"id\": \"1ca4b7bbba14a75ba00072d4b705c77f3df87db0109afaa44d50cb37c04a477e\",\n# \"name\": \"784306f655c6-infra\",\n# \"status\": \"running\"\n# }\n# ],\n# \"createdat\": \"2018-12-07 13:09:57.105112457 -0600 CST\",\n# \"id\": \"784306f655c6200aea321dd430ba685e9b2cc1f7d7528a72f3ff74ffb29485a2\",\n# \"name\": \"nostalgic_pike\",\n# \"numberofcontainers\": \"1\",\n# \"status\": \"Running\"\n# }\n# ]\n# }\n# ~~~\nmethod ListPods() -> (pods: []ListPodData)\n\n# GetPod takes a name or ID of a pod and returns single [ListPodData](#ListPodData)\n# structure. A [PodNotFound](#PodNotFound) error will be returned if the pod cannot be found.\n# See also [ListPods](ListPods).\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.GetPod '{\"name\": \"foobar\"}'\n# {\n# \"pod\": {\n# \"cgroup\": \"machine.slice\",\n# \"containersinfo\": [\n# {\n# \"id\": \"00c130a45de0411f109f1a0cfea2e298df71db20fa939de5cab8b2160a36be45\",\n# \"name\": \"1840835294cf-infra\",\n# \"status\": \"running\"\n# },\n# {\n# \"id\": \"49a5cce72093a5ca47c6de86f10ad7bb36391e2d89cef765f807e460865a0ec6\",\n# \"name\": \"upbeat_murdock\",\n# \"status\": \"running\"\n# }\n# ],\n# \"createdat\": \"2018-12-07 13:10:15.014139258 -0600 CST\",\n# \"id\": \"1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f\",\n# \"name\": \"foobar\",\n# \"numberofcontainers\": \"2\",\n# \"status\": \"Running\"\n# }\n# }\n# ~~~\nmethod GetPod(name: string) -> (pod: ListPodData)\n\n# InspectPod takes the name or ID of an image and returns a string representation of data associated with the\n# pod. You must serialize the string into JSON to use it further. A [PodNotFound](#PodNotFound) error will\n# be returned if the pod cannot be found.\nmethod InspectPod(name: string) -> (pod: string)\n\n# StartPod starts containers in a pod. It takes the name or ID of pod. If the pod cannot be found, a [PodNotFound](#PodNotFound)\n# error will be returned. Containers in a pod are started independently. If there is an error starting one container, the ID of those containers\n# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).\n# If the pod was started with no errors, the pod ID is returned.\n# See also [CreatePod](#CreatePod).\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.StartPod '{\"name\": \"135d71b9495f\"}'\n# {\n# \"pod\": \"135d71b9495f7c3967f536edad57750bfdb569336cd107d8aabab45565ffcfb6\",\n# }\n# ~~~\nmethod StartPod(name: string) -> (pod: string)\n\n# StopPod stops containers in a pod. It takes the name or ID of a pod and a timeout.\n# If the pod cannot be found, a [PodNotFound](#PodNotFound) error will be returned instead.\n# Containers in a pod are stopped independently. If there is an error stopping one container, the ID of those containers\n# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).\n# If the pod was stopped with no errors, the pod ID is returned.\n# See also [KillPod](KillPod).\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.StopPod '{\"name\": \"135d71b9495f\"}'\n# {\n# \"pod\": \"135d71b9495f7c3967f536edad57750bfdb569336cd107d8aabab45565ffcfb6\"\n# }\n# ~~~\nmethod StopPod(name: string, timeout: int) -> (pod: string)\n\n# RestartPod will restart containers in a pod given a pod name or ID. Containers in\n# the pod that are running will be stopped, then all stopped containers will be run.\n# If the pod cannot be found by name or ID, a [PodNotFound](#PodNotFound) error will be returned.\n# Containers in a pod are restarted independently. If there is an error restarting one container, the ID of those containers\n# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).\n# If the pod was restarted with no errors, the pod ID is returned.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.RestartPod '{\"name\": \"135d71b9495f\"}'\n# {\n# \"pod\": \"135d71b9495f7c3967f536edad57750bfdb569336cd107d8aabab45565ffcfb6\"\n# }\n# ~~~\nmethod RestartPod(name: string) -> (pod: string)\n\n# KillPod takes the name or ID of a pod as well as a signal to be applied to the pod. If the pod cannot be found, a\n# [PodNotFound](#PodNotFound) error is returned.\n# Containers in a pod are killed independently. If there is an error killing one container, the ID of those containers\n# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).\n# If the pod was killed with no errors, the pod ID is returned.\n# See also [StopPod](StopPod).\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.KillPod '{\"name\": \"foobar\", \"signal\": 15}'\n# {\n# \"pod\": \"1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f\"\n# }\n# ~~~\nmethod KillPod(name: string, signal: int) -> (pod: string)\n\n# PausePod takes the name or ID of a pod and pauses the running containers associated with it. If the pod cannot be found,\n# a [PodNotFound](#PodNotFound) error will be returned.\n# Containers in a pod are paused independently. If there is an error pausing one container, the ID of those containers\n# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).\n# If the pod was paused with no errors, the pod ID is returned.\n# See also [UnpausePod](#UnpausePod).\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.PausePod '{\"name\": \"foobar\"}'\n# {\n# \"pod\": \"1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f\"\n# }\n# ~~~\nmethod PausePod(name: string) -> (pod: string)\n\n# UnpausePod takes the name or ID of a pod and unpauses the paused containers associated with it. If the pod cannot be\n# found, a [PodNotFound](#PodNotFound) error will be returned.\n# Containers in a pod are unpaused independently. If there is an error unpausing one container, the ID of those containers\n# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).\n# If the pod was unpaused with no errors, the pod ID is returned.\n# See also [PausePod](#PausePod).\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.UnpausePod '{\"name\": \"foobar\"}'\n# {\n# \"pod\": \"1840835294cf076a822e4e12ba4152411f131bd869e7f6a4e8b16df9b0ea5c7f\"\n# }\n# ~~~\nmethod UnpausePod(name: string) -> (pod: string)\n\n# RemovePod takes the name or ID of a pod as well a boolean representing whether a running\n# container in the pod can be stopped and removed. If a pod has containers associated with it, and force is not true,\n# an error will occur.\n# If the pod cannot be found by name or ID, a [PodNotFound](#PodNotFound) error will be returned.\n# Containers in a pod are removed independently. If there is an error removing any container, the ID of those containers\n# will be returned in a list, along with the ID of the pod in a [PodContainerError](#PodContainerError).\n# If the pod was removed with no errors, the pod ID is returned.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.RemovePod '{\"name\": \"62f4fd98cb57\", \"force\": \"true\"}'\n# {\n# \"pod\": \"62f4fd98cb57f529831e8f90610e54bba74bd6f02920ffb485e15376ed365c20\"\n# }\n# ~~~\nmethod RemovePod(name: string, force: bool) -> (pod: string)\n\n# This method has not be implemented yet.\n# method WaitPod() -> (notimplemented: NotImplemented)\n\nmethod TopPod(pod: string, latest: bool, descriptors: []string) -> (stats: []string)\n\n# GetPodStats takes the name or ID of a pod and returns a pod name and slice of ContainerStats structure which\n# contains attributes like memory and cpu usage. If the pod cannot be found, a [PodNotFound](#PodNotFound)\n# error will be returned. If the pod has no running containers associated with it, a [NoContainerRunning](#NoContainerRunning)\n# error will be returned.\n# #### Example\n# ~~~\n# $ varlink call unix:/run/podman/io.podman/io.podman.GetPodStats '{\"name\": \"7f62b508b6f12b11d8fe02e\"}'\n# {\n# \"containers\": [\n# {\n# \"block_input\": 0,\n# \"block_output\": 0,\n# \"cpu\": 2.833470544016107524276e-08,\n# \"cpu_nano\": 54363072,\n# \"id\": \"a64b51f805121fe2c5a3dc5112eb61d6ed139e3d1c99110360d08b58d48e4a93\",\n# \"mem_limit\": 12276146176,\n# \"mem_perc\": 7.974359265237864966003e-03,\n# \"mem_usage\": 978944,\n# \"name\": \"quirky_heisenberg\",\n# \"net_input\": 866,\n# \"net_output\": 7388,\n# \"pids\": 1,\n# \"system_nano\": 20000000\n# }\n# ],\n# \"pod\": \"7f62b508b6f12b11d8fe02e0db4de6b9e43a7d7699b33a4fc0d574f6e82b4ebd\"\n# }\n# ~~~\nmethod GetPodStats(name: string) -> (pod: string, containers: []ContainerStats)\n\n# GetPodsByStatus searches for pods whose status is included in statuses\nmethod GetPodsByStatus(statuses: []string) -> (pods: []string)\n\n# ImageExists talks a full or partial image ID or name and returns an int as to whether\n# the image exists in local storage. An int result of 0 means the image does exist in\n# local storage; whereas 1 indicates the image does not exists in local storage.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.ImageExists '{\"name\": \"imageddoesntexist\"}'\n# {\n# \"exists\": 1\n# }\n# ~~~\nmethod ImageExists(name: string) -> (exists: int)\n\n# ContainerExists takes a full or partial container ID or name and returns an int as to\n# whether the container exists in local storage. A result of 0 means the container does\n# exists; whereas a result of 1 means it could not be found.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.ContainerExists '{\"name\": \"flamboyant_payne\"}'{\n# \"exists\": 0\n# }\n# ~~~\nmethod ContainerExists(name: string) -> (exists: int)\n\n# ContainerCheckPoint performs a checkpopint on a container by its name or full/partial container\n# ID. On successful checkpoint, the id of the checkpointed container is returned.\nmethod ContainerCheckpoint(name: string, keep: bool, leaveRunning: bool, tcpEstablished: bool) -> (id: string)\n\n# ContainerRestore restores a container that has been checkpointed. The container to be restored can\n# be identified by its name or full/partial container ID. A successful restore will result in the return\n# of the container's ID.\nmethod ContainerRestore(name: string, keep: bool, tcpEstablished: bool) -> (id: string)\n\n# ContainerRunlabel runs executes a command as described by a given container image label.\nmethod ContainerRunlabel(runlabel: Runlabel) -> ()\n\n# ExecContainer executes a command in the given container.\nmethod ExecContainer(opts: ExecOpts) -> ()\n\n# ListContainerMounts gathers all the mounted container mount points and returns them as an array\n# of strings\n# #### Example\n# ~~~\n# $ varlink call unix:/run/podman/io.podman/io.podman.ListContainerMounts\n# {\n# \"mounts\": {\n# \"04e4c255269ed2545e7f8bd1395a75f7949c50c223415c00c1d54bfa20f3b3d9\": \"/var/lib/containers/storage/overlay/a078925828f57e20467ca31cfca8a849210d21ec7e5757332b72b6924f441c17/merged\",\n# \"1d58c319f9e881a644a5122ff84419dccf6d138f744469281446ab243ef38924\": \"/var/lib/containers/storage/overlay/948fcf93f8cb932f0f03fd52e3180a58627d547192ffe3b88e0013b98ddcd0d2/merged\"\n# }\n# }\n# ~~~\nmethod ListContainerMounts() -> (mounts: [string]string)\n\n# MountContainer mounts a container by name or full/partial ID. Upon a successful mount, the destination\n# mount is returned as a string.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.MountContainer '{\"name\": \"jolly_shannon\"}'{\n# \"path\": \"/var/lib/containers/storage/overlay/419eeb04e783ea159149ced67d9fcfc15211084d65e894792a96bedfae0470ca/merged\"\n# }\n# ~~~\nmethod MountContainer(name: string) -> (path: string)\n\n# UnmountContainer umounts a container by its name or full/partial container ID.\n# #### Example\n# ~~~\n# $ varlink call -m unix:/run/podman/io.podman/io.podman.UnmountContainer '{\"name\": \"jolly_shannon\", \"force\": false}'\n# {}\n# ~~~\nmethod UnmountContainer(name: string, force: bool) -> ()\n\n# ImagesPrune removes all unused images from the local store. Upon successful pruning,\n# the IDs of the removed images are returned.\nmethod ImagesPrune(all: bool) -> (pruned: []string)\n\n# This function is not implemented yet.\n# method ListContainerPorts(name: string) -> (notimplemented: NotImplemented)\n\n# GenerateKube generates a Kubernetes v1 Pod description of a Podman container or pod\n# and its containers. The description is in YAML. See also [ReplayKube](ReplayKube).\nmethod GenerateKube(name: string, service: bool) -> (pod: KubePodService)\n\n# ReplayKube recreates a pod and its containers based on a Kubernetes v1 Pod description (in YAML)\n# like that created by GenerateKube. See also [GenerateKube](GenerateKube).\n# method ReplayKube() -> (notimplemented: NotImplemented)\n\n# ContainerConfig returns a container's config in string form. This call is for\n# development of Podman only and generally should not be used.\nmethod ContainerConfig(name: string) -> (config: string)\n\n# ContainerArtifacts returns a container's artifacts in string form. This call is for\n# development of Podman only and generally should not be used.\nmethod ContainerArtifacts(name: string, artifactName: string) -> (config: string)\n\n# ContainerInspectData returns a container's inspect data in string form. This call is for\n# development of Podman only and generally should not be used.\nmethod ContainerInspectData(name: string, size: bool) -> (config: string)\n\n# ContainerStateData returns a container's state config in string form. This call is for\n# development of Podman only and generally should not be used.\nmethod ContainerStateData(name: string) -> (config: string)\n\n# PodStateData returns inspectr level information of a given pod in string form. This call is for\n# development of Podman only and generally should not be used.\nmethod PodStateData(name: string) -> (config: string)\n\n# This call is for the development of Podman only and should not be used.\nmethod CreateFromCC(in: []string) -> (id: string)\n\n# Spec returns the oci spec for a container. This call is for development of Podman only and generally should not be used.\nmethod Spec(name: string) -> (config: string)\n\n# Sendfile allows a remote client to send a file to the host\nmethod SendFile(type: string, length: int) -> (file_handle: string)\n\n# ReceiveFile allows the host to send a remote client a file\nmethod ReceiveFile(path: string, delete: bool) -> (len: int)\n\n# VolumeCreate creates a volume on a remote host\nmethod VolumeCreate(options: VolumeCreateOpts) -> (volumeName: string)\n\n# VolumeRemove removes a volume on a remote host\nmethod VolumeRemove(options: VolumeRemoveOpts) -> (volumeNames: []string)\n\n# GetVolumes gets slice of the volumes on a remote host\nmethod GetVolumes(args: []string, all: bool) -> (volumes: []Volume)\n\n# VolumesPrune removes unused volumes on the host\nmethod VolumesPrune() -> (prunedNames: []string, prunedErrors: []string)\n\n# ImageSave allows you to save an image from the local image storage to a tarball\nmethod ImageSave(options: ImageSaveOptions) -> (reply: MoreResponse)\n\n# GetPodsByContext allows you to get a list pod ids depending on all, latest, or a list of\n# pod names. The definition of latest pod means the latest by creation date. In a multi-\n# user environment, results might differ from what you expect.\nmethod GetPodsByContext(all: bool, latest: bool, args: []string) -> (pods: []string)\n\n# LoadImage allows you to load an image into local storage from a tarball.\nmethod LoadImage(name: string, inputFile: string, quiet: bool, deleteFile: bool) -> (reply: MoreResponse)\n\n# GetEvents returns known libpod events filtered by the options provided.\nmethod GetEvents(filter: []string, since: string, until: string) -> (events: Event)\n\n# Diff returns a diff between libpod objects\nmethod Diff(name: string) -> (diffs: []DiffInfo)\n\n# GetLayersMapWithImageInfo is for the development of Podman and should not be used.\nmethod GetLayersMapWithImageInfo() -> (layerMap: string)\n\n# BuildImageHierarchyMap is for the development of Podman and should not be used.\nmethod BuildImageHierarchyMap(name: string) -> (imageInfo: string)\n\nmethod GenerateSystemd(name: string, restart: string, timeout: int, useName: bool) -> (unit: string)\n\n# ImageNotFound means the image could not be found by the provided name or ID in local storage.\nerror ImageNotFound (id: string, reason: string)\n\n# ContainerNotFound means the container could not be found by the provided name or ID in local storage.\nerror ContainerNotFound (id: string, reason: string)\n\n# NoContainerRunning means none of the containers requested are running in a command that requires a running container.\nerror NoContainerRunning ()\n\n# PodNotFound means the pod could not be found by the provided name or ID in local storage.\nerror PodNotFound (name: string, reason: string)\n\n# VolumeNotFound means the volume could not be found by the name or ID in local storage.\nerror VolumeNotFound (id: string, reason: string)\n\n# PodContainerError means a container associated with a pod failed to perform an operation. It contains\n# a container ID of the container that failed.\nerror PodContainerError (podname: string, errors: []PodContainerErrorData)\n\n# NoContainersInPod means a pod has no containers on which to perform the operation. It contains\n# the pod ID.\nerror NoContainersInPod (name: string)\n\n# InvalidState indicates that a container or pod was in an improper state for the requested operation\nerror InvalidState (id: string, reason: string)\n\n# ErrorOccurred is a generic error for an error that occurs during the execution. The actual error message\n# is includes as part of the error's text.\nerror ErrorOccurred (reason: string)\n\n# RuntimeErrors generally means a runtime could not be found or gotten.\nerror RuntimeError (reason: string)\n\n# The Podman endpoint requires that you use a streaming connection.\nerror WantsMoreRequired (reason: string)\n\n# Container is already stopped\nerror ErrCtrStopped (id: string)\n\n# This function requires CGroupsV2 to run in rootless mode.\nerror ErrRequiresCgroupsV2ForRootless(reason: string)\n`\n}",
"func (d *Descriptor) Image() (v1.Image, error) {\n\tswitch d.MediaType {\n\tcase types.DockerManifestSchema1, types.DockerManifestSchema1Signed:\n\t\t// We don't care to support schema 1 images:\n\t\t// https://github.com/google/go-containerregistry/issues/377\n\t\treturn nil, newErrSchema1(d.MediaType)\n\tcase types.OCIImageIndex, types.DockerManifestList:\n\t\t// We want an image but the registry has an index, resolve it to an image.\n\t\treturn d.remoteIndex().imageByPlatform(d.platform)\n\tcase types.OCIManifestSchema1, types.DockerManifestSchema2:\n\t\t// These are expected. Enumerated here to allow a default case.\n\tdefault:\n\t\t// We could just return an error here, but some registries (e.g. static\n\t\t// registries) don't set the Content-Type headers correctly, so instead...\n\t\tlogs.Warn.Printf(\"Unexpected media type for Image(): %s\", d.MediaType)\n\t}\n\n\t// Wrap the v1.Layers returned by this v1.Image in a hint for downstream\n\t// remote.Write calls to facilitate cross-repo \"mounting\".\n\timgCore, err := partial.CompressedToImage(d.remoteImage())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &mountableImage{\n\t\tImage: imgCore,\n\t\tReference: d.Ref,\n\t}, nil\n}",
"func podmanWorkflow(c cluster.TestCluster) {\n\tm := c.Machines()[0]\n\n\t// Test: Verify container can run with volume mount and port forwarding\n\timage := \"docker.io/library/nginx\"\n\twwwRoot := \"/usr/share/nginx/html\"\n\tvar id string\n\n\tc.Run(\"run\", func(c cluster.TestCluster) {\n\t\tdir := c.MustSSH(m, `mktemp -d`)\n\t\tcmd := fmt.Sprintf(\"echo TEST PAGE > %s/index.html\", string(dir))\n\t\tc.RunCmdSync(m, cmd)\n\n\t\tcmd = fmt.Sprintf(\"sudo podman run -d -p 80:80 -v %s/index.html:%s/index.html:z %s\", string(dir), wwwRoot, image)\n\t\tout := c.MustSSH(m, cmd)\n\t\tid = string(out)[0:64]\n\n\t\tpodIsRunning := func() error {\n\t\t\tb, err := c.SSH(m, `curl -f http://localhost 2>/dev/null`)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !bytes.Contains(b, []byte(\"TEST PAGE\")) {\n\t\t\t\treturn fmt.Errorf(\"nginx pod is not running %s\", b)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := util.Retry(6, 5*time.Second, podIsRunning); err != nil {\n\t\t\tc.Fatal(\"Pod is not running\")\n\t\t}\n\t})\n\n\t// Test: Execute command in container\n\tc.Run(\"exec\", func(c cluster.TestCluster) {\n\t\tcmd := fmt.Sprintf(\"sudo podman exec %s echo hello\", id)\n\t\tc.AssertCmdOutputContains(m, cmd, \"hello\")\n\t})\n\n\t// Test: Stop container\n\tc.Run(\"stop\", func(c cluster.TestCluster) {\n\t\tcmd := fmt.Sprintf(\"sudo podman stop %s\", id)\n\t\tc.RunCmdSync(m, cmd)\n\t\tpsInfo, err := getSimplifiedPsInfo(c, m)\n\t\tif err != nil {\n\t\t\tc.Fatal(err)\n\t\t}\n\n\t\tfound := false\n\t\tfor _, container := range psInfo.containers {\n\t\t\t// Sometime between podman 1.x and 2.x podman started putting\n\t\t\t// full 64 character IDs into the json output. Dynamically detect\n\t\t\t// the length of the ID and compare that number of characters.\n\t\t\tif container.ID == id[0:len(container.ID)] {\n\t\t\t\tfound = true\n\t\t\t\tif !strings.Contains(strings.ToLower(container.Status), \"exited\") {\n\t\t\t\t\tc.Fatalf(\"Container %s was not stopped. Current status: %s\", id, container.Status)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif !found {\n\t\t\tc.Fatalf(\"Unable to find container %s in podman ps -a output\", id)\n\t\t}\n\t})\n\n\t// Test: Remove container\n\tc.Run(\"remove\", func(c cluster.TestCluster) {\n\t\tcmd := fmt.Sprintf(\"sudo podman rm %s\", id)\n\t\tc.RunCmdSync(m, cmd)\n\t\tpsInfo, err := getSimplifiedPsInfo(c, m)\n\t\tif err != nil {\n\t\t\tc.Fatal(err)\n\t\t}\n\n\t\tfound := false\n\t\tfor _, container := range psInfo.containers {\n\t\t\tif container.ID == id {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\n\t\tif found {\n\t\t\tc.Fatalf(\"Container %s should be removed. %v\", id, psInfo.containers)\n\t\t}\n\t})\n\n\t// Test: Delete container\n\tc.Run(\"delete\", func(c cluster.TestCluster) {\n\t\tcmd := fmt.Sprintf(\"sudo podman rmi %s\", image)\n\t\tout := c.MustSSH(m, cmd)\n\t\timageID := string(out)\n\n\t\tcmd = fmt.Sprintf(\"sudo podman images | grep %s\", imageID)\n\t\tout, err := c.SSH(m, cmd)\n\t\tif err == nil {\n\t\t\tc.Fatalf(\"Image should be deleted but found %s\", string(out))\n\t\t}\n\t})\n}",
"func (d *Descriptor) Image() (v1.Image, error) {\n\tswitch d.MediaType {\n\tcase types.DockerManifestSchema1, types.DockerManifestSchema1Signed:\n\t\t// We don't care to support schema 1 images:\n\t\t// https://github.com/google/go-containerregistry/issues/377\n\t\treturn nil, newErrSchema1(d.MediaType)\n\tcase types.OCIImageIndex, types.DockerManifestList:\n\t\t// We want an image but the registry has an index, resolve it to an image.\n\t\treturn d.remoteIndex().imageByPlatform(d.platform)\n\tcase types.OCIManifestSchema1, types.DockerManifestSchema2:\n\t\t// These are expected. Enumerated here to allow a default case.\n\tdefault:\n\t\t// We could just return an error here, but some registries (e.g. static\n\t\t// registries) don't set the Content-Type headers correctly, so instead...\n\t\tlogs.Warn.Printf(\"Unexpected media type for Image(): %s\", d.MediaType)\n\t}\n\n\t// Wrap the v1.Layers returned by this v1.Image in a hint for downstream\n\t// remote.Write calls to facilitate cross-repo \"mounting\".\n\timgCore, err := partial.CompressedToImage(d.remoteImage())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &mountableImage{\n\t\tImage: imgCore,\n\t\tReference: d.ref,\n\t}, nil\n}",
"func Image(reference string) func(*latest.ServiceConfig) {\n\treturn func(c *latest.ServiceConfig) {\n\t\tc.Image = reference\n\t}\n}",
"func RunPod(ctx context.Context, pod corev1.Pod,\n\tts oauth2.TokenSource, watcherImage, project, zone, machineType string) error {\n\n\tsvc, err := compute.NewService(ctx, option.WithTokenSource(ts))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := yaml.Marshal(pod)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpodstr := string(b)\n\tlog.Println(\"POD MANIFEST:\\n\", podstr) // TODO remove\n\n\tregion := zone[:strings.LastIndex(zone, \"-\")]\n\n\twatcherPod := fmt.Sprintf(watcherPodFmt, watcherImage)\n\n\tname := \"instance-\" + uuid.New().String()[:4]\n\tlog.Printf(\"creating %q...\", name)\n\top, err := svc.Instances.Insert(project, zone, &compute.Instance{\n\t\tName: name,\n\t\tZone: zone,\n\t\tMachineType: fmt.Sprintf(\"projects/%s/zones/%s/machineTypes/%s\", project, zone, machineType),\n\t\tDisks: []*compute.AttachedDisk{{\n\t\t\tInitializeParams: &compute.AttachedDiskInitializeParams{\n\t\t\t\tSourceImage: \"projects/cos-cloud/global/images/family/cos-stable\",\n\t\t\t},\n\t\t\tBoot: true,\n\t\t}},\n\t\tNetworkInterfaces: []*compute.NetworkInterface{{\n\t\t\tSubnetwork: fmt.Sprintf(\"projects/%s/regions/%s/subnetworks/default\", project, region),\n\t\t\tAccessConfigs: []*compute.AccessConfig{{\n\t\t\t\tName: \"External NAT\",\n\t\t\t\tType: \"ONE_TO_ONE_NAT\",\n\t\t\t\tNetworkTier: \"PREMIUM\",\n\t\t\t}},\n\t\t}},\n\t\tServiceAccounts: []*compute.ServiceAccount{{\n\t\t\tEmail: \"[email protected]\",\n\t\t\tScopes: []string{\n\t\t\t\t// Permiission to pull private images (watcher)\n\t\t\t\t\"https://www.googleapis.com/auth/devstorage.read_only\",\n\n\t\t\t\t// Permission to write logs and metrics (google-fluentd)\n\t\t\t\t\"https://www.googleapis.com/auth/logging.write\",\n\t\t\t\t\"https://www.googleapis.com/auth/monitoring.write\",\n\t\t\t},\n\t\t}},\n\t\tMetadata: &compute.Metadata{\n\t\t\tItems: []*compute.MetadataItems{{\n\t\t\t\tKey: \"user-data\",\n\t\t\t\tValue: &cloudConfig,\n\t\t\t}, {\n\t\t\t\tKey: \"watcher\",\n\t\t\t\tValue: &watcherPod,\n\t\t\t}, {\n\t\t\t\tKey: \"pod\",\n\t\t\t\tValue: &podstr,\n\t\t\t}, {\n\t\t\t\tKey: \"ca-cert\",\n\t\t\t\tValue: &caCert,\n\t\t\t}, {\n\t\t\t\tKey: \"ca-cert-key\",\n\t\t\t\tValue: &caCertKey,\n\t\t\t}, {\n\t\t\t\tKey: \"cos-metrics-enabled\",\n\t\t\t\tValue: &trueString,\n\t\t\t}},\n\t\t},\n\t\tTags: &compute.Tags{Items: []string{\"https-server\"}},\n\t\tShieldedInstanceConfig: &compute.ShieldedInstanceConfig{\n\t\t\tEnableSecureBoot: true,\n\t\t},\n\t}).Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstart := time.Now()\n\tfor ; ; time.Sleep(time.Second) {\n\t\top, err = svc.ZoneOperations.Get(project, zone, op.Name).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"operation is %q...\", op.Status)\n\t\tif op.Status == \"DONE\" {\n\t\t\tbreak\n\t\t}\n\t}\n\tlog.Println(\"startup took\", time.Since(start))\n\treturn nil\n}",
"func podInfo(pod v1.Pod) (result string) {\n\tpodname := pod.Name\n\tpodstatus := strings.ToLower(string(pod.Status.Phase))\n\timages := \"\"\n\tfor _, container := range pod.Spec.Containers {\n\t\timages += fmt.Sprintf(\"%v \", container.Image)\n\t}\n\tresult += fmt.Sprintf(\"pod [%v] is %v and uses image(s) %v\\n\", podname, podstatus, images)\n\treturn result\n}",
"func BuildImagePackage(imgName, packName, targetUri, creds, artHome string) error {\n\tpName, err := core.ParseName(packName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid package name: %s, ensure the container image name fully specify host and user/group if from docker.io\", err)\n\t}\n\t// if a target has been specified\n\tif len(targetUri) > 0 {\n\t\t// if a final slash does not exist add it\n\t\tif targetUri[len(targetUri)-1] != '/' {\n\t\t\ttargetUri = fmt.Sprintf(\"%s/\", targetUri)\n\t\t}\n\t\t// automatically adds a tar filename to the URI based on the package name:tag\n\t\ttargetUri = fmt.Sprintf(\"%s%s\", targetUri, pkgFilename(*pName))\n\t}\n\t// should we use docker or podman?\n\tcontainerCli, err := containerCmd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot create image archive: %s\", err)\n\t}\n\t// create a build file to build the package containing the image tar\n\tpbf := data.BuildFile{\n\t\tLabels: map[string]string{\n\t\t\t\"image\": imgName,\n\t\t},\n\t\tProfiles: []*data.Profile{\n\t\t\t{\n\t\t\t\tName: \"package-image\",\n\t\t\t\tTarget: \"./build\",\n\t\t\t\tType: \"content/image\",\n\t\t\t},\n\t\t},\n\t}\n\tpbfBytes, err := yaml.Marshal(pbf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot marshall packaging build file: %s\", err)\n\t}\n\t// create a build file to import image tar in package\n\texport := true\n\tbf := data.BuildFile{\n\t\tLabels: map[string]string{\n\t\t\t\"image\": imgName,\n\t\t},\n\t\tFunctions: []*data.Function{\n\t\t\t{\n\t\t\t\tName: \"import\",\n\t\t\t\tDescription: \"imports docker image in local docker registry\",\n\t\t\t\tExport: &export,\n\t\t\t\tRun: []string{\n\t\t\t\t\tfmt.Sprintf(\"%s load -i %s.tar\", containerCli, pkgName(imgName)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tbfBytes, err := yaml.Marshal(bf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot marshall package build file: %s\", err)\n\t}\n\n\ttmp, err := core.NewTempDir(artHome)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot create temp folder for processing image archive: %s\", err)\n\t}\n\t// create a target folder for the artisan package\n\ttargetFolder := filepath.Join(tmp, \"build\")\n\terr = os.MkdirAll(targetFolder, 0755)\n\t// workout the docker save command\n\tcmd := fmt.Sprintf(\"%s save %s -o %s/%s.tar\", containerCli, imgName, targetFolder, imgFilename(imgName))\n\t// execute the command synchronously\n\tcore.InfoLogger.Printf(\"exporting image %s to tarball file\", imgName)\n\t_, err = build.Exe(cmd, tmp, merge.NewEnVarFromSlice([]string{}), false)\n\tif err != nil {\n\t\tos.RemoveAll(tmp)\n\t\treturn fmt.Errorf(\"cannot execute archive command: %s\", err)\n\t}\n\tcore.InfoLogger.Println(\"packaging image tarball file\")\n\terr = os.WriteFile(filepath.Join(tmp, \"build.yaml\"), pbfBytes, 0755)\n\tif err != nil {\n\t\tos.RemoveAll(tmp)\n\t\treturn fmt.Errorf(\"cannot save packaging build file: %s\", err)\n\t}\n\terr = os.WriteFile(filepath.Join(targetFolder, \"build.yaml\"), bfBytes, 0755)\n\tif err != nil {\n\t\tos.RemoveAll(tmp)\n\t\treturn fmt.Errorf(\"cannot save package build file: %s\", err)\n\t}\n\tb := build.NewBuilder(artHome)\n\tb.Build(tmp, \"\", \"\", pName, \"\", false, false, \"\")\n\tr := registry.NewLocalRegistry(artHome)\n\t// export package\n\tcore.InfoLogger.Printf(\"exporting image package to tarball file\")\n\tif len(targetUri) > 0 {\n\t\terr = r.ExportPackage([]core.PackageName{*pName}, \"\", targetUri, creds)\n\t\tif err != nil {\n\t\t\tos.RemoveAll(tmp)\n\t\t\treturn fmt.Errorf(\"cannot save package to destination: %s\", err)\n\t\t}\n\t}\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
PodmanWrite saves the image into podman as the given tag. same as github.com/google/gocontainerregistry/pkg/v1/daemon | func PodmanWrite(ref name.Reference, img v1.Image, opts ...tarball.WriteOption) (string, error) {
pr, pw := io.Pipe()
go func() {
_ = pw.CloseWithError(tarball.Write(ref, img, pw, opts...))
}()
// write the image in docker save format first, then load it
cmd := exec.Command("sudo", "podman", "image", "load")
cmd.Stdin = pr
output, err := cmd.Output()
if err != nil {
return "", fmt.Errorf("error loading image: %v", err)
}
// pull the image from the registry, to get the digest too
// podman: "Docker references with both a tag and digest are currently not supported"
cmd = exec.Command("sudo", "podman", "image", "pull", strings.Split(ref.Name(), "@")[0])
err = cmd.Run()
if err != nil {
return "", fmt.Errorf("error pulling image: %v", err)
}
return string(output), nil
} | [
"func PodmanImage(ref name.Reference, options ...interface{}) (v1.Image, error) {\n\tvar img v1.Image\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\topener := func() (io.ReadCloser, error) {\n\t\t\treturn pr, nil\n\t\t}\n\t\tvar err error\n\t\ttag := ref.(name.Digest).Tag()\n\t\timg, err = tarball.Image(opener, &tag)\n\t\t_ = pr.CloseWithError(err)\n\t}()\n\n\t// write the image in docker save format first, then load it\n\tcmd := exec.Command(\"sudo\", \"podman\", \"image\", \"save\", strings.Split(ref.Name(), \"@\")[0])\n\tcmd.Stdout = pw\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error loading image: %v\", err)\n\t}\n\treturn img, nil\n}",
"func RunPod(ctx context.Context, pod corev1.Pod,\n\tts oauth2.TokenSource, watcherImage, project, zone, machineType string) error {\n\n\tsvc, err := compute.NewService(ctx, option.WithTokenSource(ts))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := yaml.Marshal(pod)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpodstr := string(b)\n\tlog.Println(\"POD MANIFEST:\\n\", podstr) // TODO remove\n\n\tregion := zone[:strings.LastIndex(zone, \"-\")]\n\n\twatcherPod := fmt.Sprintf(watcherPodFmt, watcherImage)\n\n\tname := \"instance-\" + uuid.New().String()[:4]\n\tlog.Printf(\"creating %q...\", name)\n\top, err := svc.Instances.Insert(project, zone, &compute.Instance{\n\t\tName: name,\n\t\tZone: zone,\n\t\tMachineType: fmt.Sprintf(\"projects/%s/zones/%s/machineTypes/%s\", project, zone, machineType),\n\t\tDisks: []*compute.AttachedDisk{{\n\t\t\tInitializeParams: &compute.AttachedDiskInitializeParams{\n\t\t\t\tSourceImage: \"projects/cos-cloud/global/images/family/cos-stable\",\n\t\t\t},\n\t\t\tBoot: true,\n\t\t}},\n\t\tNetworkInterfaces: []*compute.NetworkInterface{{\n\t\t\tSubnetwork: fmt.Sprintf(\"projects/%s/regions/%s/subnetworks/default\", project, region),\n\t\t\tAccessConfigs: []*compute.AccessConfig{{\n\t\t\t\tName: \"External NAT\",\n\t\t\t\tType: \"ONE_TO_ONE_NAT\",\n\t\t\t\tNetworkTier: \"PREMIUM\",\n\t\t\t}},\n\t\t}},\n\t\tServiceAccounts: []*compute.ServiceAccount{{\n\t\t\tEmail: \"[email protected]\",\n\t\t\tScopes: []string{\n\t\t\t\t// Permiission to pull private images (watcher)\n\t\t\t\t\"https://www.googleapis.com/auth/devstorage.read_only\",\n\n\t\t\t\t// Permission to write logs and metrics (google-fluentd)\n\t\t\t\t\"https://www.googleapis.com/auth/logging.write\",\n\t\t\t\t\"https://www.googleapis.com/auth/monitoring.write\",\n\t\t\t},\n\t\t}},\n\t\tMetadata: &compute.Metadata{\n\t\t\tItems: []*compute.MetadataItems{{\n\t\t\t\tKey: \"user-data\",\n\t\t\t\tValue: &cloudConfig,\n\t\t\t}, {\n\t\t\t\tKey: \"watcher\",\n\t\t\t\tValue: &watcherPod,\n\t\t\t}, {\n\t\t\t\tKey: \"pod\",\n\t\t\t\tValue: &podstr,\n\t\t\t}, {\n\t\t\t\tKey: \"ca-cert\",\n\t\t\t\tValue: &caCert,\n\t\t\t}, {\n\t\t\t\tKey: \"ca-cert-key\",\n\t\t\t\tValue: &caCertKey,\n\t\t\t}, {\n\t\t\t\tKey: \"cos-metrics-enabled\",\n\t\t\t\tValue: &trueString,\n\t\t\t}},\n\t\t},\n\t\tTags: &compute.Tags{Items: []string{\"https-server\"}},\n\t\tShieldedInstanceConfig: &compute.ShieldedInstanceConfig{\n\t\t\tEnableSecureBoot: true,\n\t\t},\n\t}).Do()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstart := time.Now()\n\tfor ; ; time.Sleep(time.Second) {\n\t\top, err = svc.ZoneOperations.Get(project, zone, op.Name).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Printf(\"operation is %q...\", op.Status)\n\t\tif op.Status == \"DONE\" {\n\t\t\tbreak\n\t\t}\n\t}\n\tlog.Println(\"startup took\", time.Since(start))\n\treturn nil\n}",
"func Write(tag name.Tag, img v1.Image, options ...Option) (string, error) {\n\to, err := makeOptions(options...)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\tpw.CloseWithError(tarball.Write(tag, img, pw))\n\t}()\n\n\t// write the image in docker save format first, then load it\n\tresp, err := o.client.ImageLoad(o.ctx, pr, false)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error loading image: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\tb, err := io.ReadAll(resp.Body)\n\tresponse := string(b)\n\tif err != nil {\n\t\treturn response, fmt.Errorf(\"error reading load response body: %w\", err)\n\t}\n\treturn response, nil\n}",
"func ScpTag(cmd *exec.Cmd, podman string, dest entities.ImageScpOptions) error {\n\tcmd.Stdout = nil\n\tout, err := cmd.Output() // this function captures the output temporarily in order to execute the next command\n\tif err != nil {\n\t\treturn err\n\t}\n\timage := ExtractImage(out)\n\tif cmd.Args[0] == \"sudo\" { // transferRootless will need the sudo since we are loading to sudo from a user acct\n\t\tcmd = exec.Command(\"sudo\", podman, \"tag\", image, dest.Tag)\n\t} else {\n\t\tcmd = exec.Command(podman, \"tag\", image, dest.Tag)\n\t}\n\tcmd.Stdout = os.Stdout\n\treturn cmd.Run()\n}",
"func (s *Operations) createPod(secretData map[string][]byte, kustomize bool, imageOverride string) (*v1.Pod, *podimpersonation.PodOptions) {\n\timage := imageOverride\n\tif image == \"\" {\n\t\timage = settings.FullShellImage()\n\t}\n\tsecret := &v1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: \"helm-operation-\",\n\t\t\tNamespace: s.namespace,\n\t\t},\n\t\tData: secretData,\n\t}\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: \"helm-operation-\",\n\t\t\tNamespace: s.namespace,\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tVolumes: []v1.Volume{\n\t\t\t\t{\n\t\t\t\t\tName: \"data\",\n\t\t\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\t\t\tSecret: &v1.SecretVolumeSource{\n\t\t\t\t\t\t\tSecretName: \"helm-operation-\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tTerminationGracePeriodSeconds: new(int64),\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\"kubernetes.io/os\": \"linux\",\n\t\t\t},\n\t\t\tTolerations: []v1.Toleration{\n\t\t\t\t{\n\t\t\t\t\tKey: \"cattle.io/os\",\n\t\t\t\t\tOperator: corev1.TolerationOpEqual,\n\t\t\t\t\tValue: \"linux\",\n\t\t\t\t\tEffect: \"NoSchedule\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey: \"node-role.kubernetes.io/controlplane\",\n\t\t\t\t\tOperator: corev1.TolerationOpEqual,\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t\tEffect: \"NoSchedule\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey: \"node-role.kubernetes.io/etcd\",\n\t\t\t\t\tOperator: corev1.TolerationOpEqual,\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t\tEffect: \"NoExecute\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey: \"node.cloudprovider.kubernetes.io/uninitialized\",\n\t\t\t\t\tOperator: corev1.TolerationOpEqual,\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t\tEffect: \"NoSchedule\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tContainers: []v1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"helm\",\n\t\t\t\t\tEnv: []v1.EnvVar{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"KUBECONFIG\",\n\t\t\t\t\t\t\tValue: \"/home/shell/.kube/config\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tStdin: true,\n\t\t\t\t\tTTY: true,\n\t\t\t\t\tStdinOnce: true,\n\t\t\t\t\tImage: image,\n\t\t\t\t\tImagePullPolicy: v1.PullIfNotPresent,\n\t\t\t\t\tCommand: []string{\"helm-cmd\"},\n\t\t\t\t\tWorkingDir: helmDataPath,\n\t\t\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"data\",\n\t\t\t\t\t\t\tMountPath: helmDataPath,\n\t\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// if kustomize is false then helmDataPath is an acceptable path for helm to run. If it is true,\n\t// files are copied from helmDataPath to helmRunPath. This is because the kustomize.sh script\n\t// needs write permissions but volumes using a SecretVolumeSource are readOnly. This can not be\n\t// changed with the readOnly field or the defaultMode field.\n\t// See: https://github.com/kubernetes/kubernetes/issues/62099.\n\tif kustomize {\n\t\tpod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{\n\t\t\tName: \"helm-run\",\n\t\t\tVolumeSource: v1.VolumeSource{\n\t\t\t\tEmptyDir: &v1.EmptyDirVolumeSource{},\n\t\t\t},\n\t\t})\n\t\tpod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{\n\t\t\tName: \"helm-run\",\n\t\t\tMountPath: helmRunPath,\n\t\t})\n\t\tpod.Spec.Containers[0].Lifecycle = &v1.Lifecycle{\n\t\t\tPostStart: &v1.LifecycleHandler{\n\t\t\t\tExec: &v1.ExecAction{\n\t\t\t\t\tCommand: []string{\"/bin/sh\", \"-c\", fmt.Sprintf(\"cp -r %s/. %s\", helmDataPath, helmRunPath)},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tpod.Spec.Containers[0].WorkingDir = helmRunPath\n\t}\n\treturn pod, &podimpersonation.PodOptions{\n\t\tSecretsToCreate: []*v1.Secret{\n\t\t\tsecret,\n\t\t},\n\t\tImageOverride: imageOverride,\n\t}\n}",
"func (f *FakeRunner) podman(args []string, _ bool) (string, error) {\n\tswitch cmd := args[0]; cmd {\n\tcase \"--version\":\n\t\treturn \"podman version 1.6.4\", nil\n\n\tcase \"image\":\n\n\t\tif args[1] == \"inspect\" && args[2] == \"--format\" && args[3] == \"{{.Id}}\" {\n\t\t\tif args[3] == \"missing\" {\n\t\t\t\treturn \"\", &exec.ExitError{Stderr: []byte(\"Error: error getting image \\\"missing\\\": unable to find a name and tag match for missing in repotags: no such image\")}\n\t\t\t}\n\t\t\treturn \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\", nil\n\t\t}\n\n\t}\n\treturn \"\", nil\n}",
"func getDaemonSetImagePatch(containerName, containerImage string) string {\n\treturn fmt.Sprintf(`{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"%s\",\"image\":\"%s\"}]}}}}`, containerName, containerImage)\n}",
"func ExecPodman(dest entities.ImageScpOptions, podman string, command []string) (string, error) {\n\tcmd := exec.Command(podman)\n\tCreateSCPCommand(cmd, command[1:])\n\tlogrus.Debugf(\"Executing podman command: %q\", cmd)\n\tif strings.Contains(strings.Join(command, \" \"), \"load\") { // need to tag\n\t\tif len(dest.Tag) > 0 {\n\t\t\treturn \"\", ScpTag(cmd, podman, dest)\n\t\t}\n\t\tcmd.Stdout = nil\n\t\tout, err := cmd.Output()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\timg := ExtractImage(out)\n\t\treturn img, nil\n\t}\n\treturn \"\", cmd.Run()\n}",
"func generatePod(c *client.Client, podName string, nsName string, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tp := pod.Instance{\n\t\tName: podName,\n\t\tNamespace: nsName,\n\t\tImage: imageSource,\n\t\tLabelKey: \"app\",\n\t\tImagePullPolicy: \"ifnotpresent\",\n\t\tLabelValue: \"podTest\",\n\t}\n\n\ttimeNow := time.Now()\n\tfmt.Printf(\"creating pod %s in namespace %s\\n\", podName, nsName)\n\terr := pod.CreateWaitRunningState(c, &p)\n\t//if err != nil {\n\t//\tfmt.Printf(\"%s\\n\", err)\n\t//\tos.Exit(1)\n\t//}\n\n\tlastTime, err := pod.GetLastTimeConditionHappened(c,\n\t\t\"Ready\",\n\t\tpodName,\n\t\tnsName)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\thour := lastTime.Sub(timeNow).Hours()\n\thour, mf := math.Modf(hour)\n\ttotalHour = totalHour + hour\n\n\tminutes := mf * 60\n\tminutes, sf := math.Modf(minutes)\n\ttotalMinutes = totalMinutes + minutes\n\n\tseconds := sf * 60\n\ttotalSec = totalSec + seconds\n\n\tfmt.Printf(\"\\n- %s is created and responsive in namespace %s ✅\\n\", p.Name, p.Namespace)\n\tfmt.Printf(\"- image used: %s\\n\", imageSource)\n\n\tfmt.Println(\" took:\", hour, \"hours\",\n\t\tminutes, \"minutes\",\n\t\tseconds, \"seconds\")\n\tsumSec = append(sumSec, totalSec)\n\tsumMin = append(sumMin, totalMinutes)\n\tsumHour = append(sumHour, totalHour)\n\ttotalPodsRunning = totalPodsRunning + 1\n\tfmt.Printf(\"TOTAL NUMBER OF PODS RUNNING: %v\\n\", totalPodsRunning)\n\tfmt.Printf(\"TIME NOW: %v\\n\", time.Now().Format(\"2006-01-02 3:4:5 PM\"))\n\n\ttotalHour = 0\n\ttotalMinutes = 0\n\ttotalSec = 0\n}",
"func writePodInformation(pod corev1.Pod) string {\n\tvar buffer strings.Builder\n\tvar containers, readyContainers, restarts int\n\tfor _, conStatus := range pod.Status.ContainerStatuses {\n\t\tcontainers++\n\t\tif conStatus.Ready {\n\t\t\treadyContainers++\n\t\t}\n\n\t\trestarts += int(conStatus.RestartCount)\n\t}\n\n\tbuffer.WriteString(pod.GetName())\n\tbuffer.WriteString(\"\\t\")\n\tbuffer.WriteString(strconv.Itoa(readyContainers))\n\tbuffer.WriteString(\"/\")\n\tbuffer.WriteString(strconv.Itoa(containers))\n\tbuffer.WriteString(\"\\t\")\n\tbuffer.WriteString(string(pod.Status.Phase))\n\n\tif pod.Status.Phase == corev1.PodPending {\n\t\tfor _, condition := range pod.Status.Conditions {\n\t\t\t// Only check the PodScheduled condition.\n\t\t\tif condition.Type != corev1.PodScheduled {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// If the Pod is scheduled we can ignore this condition.\n\t\t\tif condition.Status == corev1.ConditionTrue {\n\t\t\t\tbuffer.WriteString(\"\\t-\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Printout the message, why the Pod is not scheduling.\n\t\t\tbuffer.WriteString(\"\\t\")\n\t\t\tif condition.Message != \"\" {\n\t\t\t\tbuffer.WriteString(condition.Message)\n\t\t\t} else {\n\t\t\t\tbuffer.WriteString(\"-\")\n\t\t\t}\n\t\t}\n\t} else {\n\t\tbuffer.WriteString(\"\\t-\")\n\t}\n\n\tbuffer.WriteString(\"\\t\")\n\tbuffer.WriteString(strconv.Itoa(restarts))\n\n\tif _, ok := pod.Labels[fdbv1beta2.FDBProcessGroupIDLabel]; ok {\n\t\tvar mainTag, sidecarTag string\n\t\tfor _, container := range pod.Spec.Containers {\n\t\t\tif container.Name == fdbv1beta2.MainContainerName {\n\t\t\t\tmainTag = strings.Split(container.Image, \":\")[1]\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif container.Name == fdbv1beta2.SidecarContainerName {\n\t\t\t\tsidecarTag = strings.Split(container.Image, \":\")[1]\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tbuffer.WriteString(\"\\t\")\n\t\tbuffer.WriteString(mainTag)\n\t\tbuffer.WriteString(\"\\t\")\n\t\tbuffer.WriteString(sidecarTag)\n\t} else {\n\t\tbuffer.WriteString(\"\\t-\\t-\")\n\t}\n\n\tbuffer.WriteString(\"\\t\")\n\tendIdx := len(pod.Status.PodIPs) - 1\n\tfor idx, ip := range pod.Status.PodIPs {\n\t\tbuffer.WriteString(ip.IP)\n\t\tif endIdx > idx {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\t}\n\n\tbuffer.WriteString(\"\\t\")\n\tbuffer.WriteString(pod.Spec.NodeName)\n\tbuffer.WriteString(\"\\t\")\n\tbuffer.WriteString(duration.HumanDuration(time.Since(pod.CreationTimestamp.Time)))\n\n\treturn buffer.String()\n}",
"func genpod(namespace, name, image string) *corev1.Pod {\n\tvar userID int64 = 65534\n\n\treturn &corev1.Pod{\n\t\tMetadata: &metav1.ObjectMeta{\n\t\t\tName: k8s.String(name),\n\t\t\tNamespace: k8s.String(namespace),\n\t\t\tLabels: map[string]string{\"generator\": \"kboom\"},\n\t\t},\n\t\tSpec: &corev1.PodSpec{\n\t\t\tContainers: []*corev1.Container{\n\t\t\t\t&corev1.Container{\n\t\t\t\t\tName: k8s.String(\"main\"),\n\t\t\t\t\tImage: k8s.String(image),\n\t\t\t\t\tCommand: []string{\"/bin/sh\", \"-ec\", \"sleep 3600\"},\n\t\t\t\t\tSecurityContext: &corev1.SecurityContext{\n\t\t\t\t\t\tRunAsUser: &userID,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}",
"func pushTagImage(idx tagindex, from *os.File, token string) error {\n\t// XXX implement ssl please\n\tconn, err := grpc.Dial(idx.server, grpc.WithInsecure())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)\n\tdefer cancel()\n\n\tclient := pb.NewTagIOServiceClient(conn)\n\tstream, err := client.Push(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// we first send over a communication to indicate we are\n\t// willing to send an image. That will bail out if the\n\t// provided info is wrong.\n\tireq := &pb.PushRequest{\n\t\tTestOneof: &pb.PushRequest_Request{\n\t\t\tRequest: &pb.Request{\n\t\t\t\tName: idx.name,\n\t\t\t\tNamespace: idx.namespace,\n\t\t\t\tToken: token,\n\t\t\t},\n\t\t},\n\t}\n\tif err := stream.Send(ireq); err != nil {\n\t\treturn err\n\t}\n\n\t_, err = pb.SendFileClient(from, stream)\n\treturn err\n}",
"func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appName, appNodeName, runID, labelSuffix string) error {\n\n\thelperPod := &apiv1.Pod{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: experimentsDetails.ExperimentName + \"-helper-\" + runID,\n\t\t\tNamespace: experimentsDetails.ChaosNamespace,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": experimentsDetails.ExperimentName + \"-helper-\" + labelSuffix,\n\t\t\t\t\"name\": experimentsDetails.ExperimentName + \"-helper-\" + runID,\n\t\t\t\t\"chaosUID\": string(experimentsDetails.ChaosUID),\n\t\t\t\t\"app.kubernetes.io/part-of\": \"litmus\",\n\t\t\t},\n\t\t\tAnnotations: experimentsDetails.Annotations,\n\t\t},\n\t\tSpec: apiv1.PodSpec{\n\t\t\tRestartPolicy: apiv1.RestartPolicyNever,\n\t\t\tImagePullSecrets: experimentsDetails.ImagePullSecrets,\n\t\t\tNodeName: appNodeName,\n\t\t\tVolumes: []apiv1.Volume{\n\t\t\t\t{\n\t\t\t\t\tName: \"dockersocket\",\n\t\t\t\t\tVolumeSource: apiv1.VolumeSource{\n\t\t\t\t\t\tHostPath: &apiv1.HostPathVolumeSource{\n\t\t\t\t\t\t\tPath: experimentsDetails.SocketPath,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tContainers: []apiv1.Container{\n\t\t\t\t{\n\t\t\t\t\tName: experimentsDetails.ExperimentName,\n\t\t\t\t\tImage: experimentsDetails.LIBImage,\n\t\t\t\t\tImagePullPolicy: apiv1.PullPolicy(experimentsDetails.LIBImagePullPolicy),\n\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\"sudo\",\n\t\t\t\t\t\t\"-E\",\n\t\t\t\t\t},\n\t\t\t\t\tArgs: []string{\n\t\t\t\t\t\t\"pumba\",\n\t\t\t\t\t\t\"--random\",\n\t\t\t\t\t\t\"--interval\",\n\t\t\t\t\t\tstrconv.Itoa(experimentsDetails.ChaosInterval) + \"s\",\n\t\t\t\t\t\t\"kill\",\n\t\t\t\t\t\t\"--signal\",\n\t\t\t\t\t\texperimentsDetails.Signal,\n\t\t\t\t\t\t\"re2:k8s_\" + experimentsDetails.TargetContainer + \"_\" + appName,\n\t\t\t\t\t},\n\t\t\t\t\tEnv: []apiv1.EnvVar{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"DOCKER_HOST\",\n\t\t\t\t\t\t\tValue: \"unix://\" + experimentsDetails.SocketPath,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tResources: experimentsDetails.Resources,\n\t\t\t\t\tVolumeMounts: []apiv1.VolumeMount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"dockersocket\",\n\t\t\t\t\t\t\tMountPath: experimentsDetails.SocketPath,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(helperPod)\n\treturn err\n}",
"func formatPodImage(raw string) (result string) {\n\ts := strings.Split(raw, \":\")\n\tif len(s) == 3 {\n\t\tresult = s[2]\n\t}\n\treturn\n}",
"func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string, imageNotFoundOnce *sync.Once) error {\n\tlog := r.log\n\tstatus, lastVersion := r.context.GetStatus()\n\n\t// Select image\n\timageInfo, imageFound := r.SelectImage(spec, status)\n\tif !imageFound {\n\t\timageNotFoundOnce.Do(func() {\n\t\t\tlog.Debug().Str(\"image\", spec.GetImage()).Msg(\"Image ID is not known yet for image\")\n\t\t})\n\t\treturn nil\n\t}\n\tstatus.CurrentImage = &imageInfo\n\n\tpod, err := r.RenderPodForMember(spec, status, memberID, imageInfo)\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\n\tkubecli := r.context.GetKubeCli()\n\tapiObject := r.context.GetAPIObject()\n\tns := r.context.GetNamespace()\n\tsecrets := kubecli.CoreV1().Secrets(ns)\n\tm, group, found := status.Members.ElementByID(memberID)\n\tif !found {\n\t\treturn maskAny(fmt.Errorf(\"Member '%s' not found\", memberID))\n\t}\n\tgroupSpec := spec.GetServerGroupSpec(group)\n\n\t// Update pod name\n\trole := group.AsRole()\n\troleAbbr := group.AsRoleAbbreviated()\n\n\tm.PodName = k8sutil.CreatePodName(apiObject.GetName(), roleAbbr, m.ID, CreatePodSuffix(spec))\n\tnewPhase := api.MemberPhaseCreated\n\t// Create pod\n\tif group.IsArangod() {\n\t\t// Prepare arguments\n\t\tautoUpgrade := m.Conditions.IsTrue(api.ConditionTypeAutoUpgrade)\n\t\tif autoUpgrade {\n\t\t\tnewPhase = api.MemberPhaseUpgrading\n\t\t}\n\t\tif spec.IsSecure() {\n\t\t\ttlsKeyfileSecretName := k8sutil.CreateTLSKeyfileSecretName(apiObject.GetName(), role, m.ID)\n\t\t\tserverNames := []string{\n\t\t\t\tk8sutil.CreateDatabaseClientServiceDNSName(apiObject),\n\t\t\t\tk8sutil.CreatePodDNSName(apiObject, role, m.ID),\n\t\t\t}\n\t\t\tif ip := spec.ExternalAccess.GetLoadBalancerIP(); ip != \"\" {\n\t\t\t\tserverNames = append(serverNames, ip)\n\t\t\t}\n\t\t\towner := apiObject.AsOwner()\n\t\t\tif err := createTLSServerCertificate(log, secrets, serverNames, spec.TLS, tlsKeyfileSecretName, &owner); err != nil && !k8sutil.IsAlreadyExists(err) {\n\t\t\t\treturn maskAny(errors.Wrapf(err, \"Failed to create TLS keyfile secret\"))\n\t\t\t}\n\t\t}\n\n\t\tuid, checksum, err := CreateArangoPod(kubecli, apiObject, pod)\n\t\tif err != nil {\n\t\t\treturn maskAny(err)\n\t\t}\n\n\t\tm.PodUID = uid\n\t\tm.PodSpecVersion = checksum\n\t\tm.ArangoVersion = status.CurrentImage.ArangoDBVersion\n\t\tm.ImageID = status.CurrentImage.ImageID\n\n\t\t// Check for missing side cars in\n\t\tm.SideCarSpecs = make(map[string]core.Container)\n\t\tfor _, specSidecar := range groupSpec.GetSidecars() {\n\t\t\tm.SideCarSpecs[specSidecar.Name] = *specSidecar.DeepCopy()\n\t\t}\n\n\t\tlog.Debug().Str(\"pod-name\", m.PodName).Msg(\"Created pod\")\n\t} else if group.IsArangosync() {\n\t\t// Check monitoring token secret\n\t\tif group == api.ServerGroupSyncMasters {\n\t\t\t// Create TLS secret\n\t\t\ttlsKeyfileSecretName := k8sutil.CreateTLSKeyfileSecretName(apiObject.GetName(), role, m.ID)\n\t\t\tserverNames := []string{\n\t\t\t\tk8sutil.CreateSyncMasterClientServiceName(apiObject.GetName()),\n\t\t\t\tk8sutil.CreateSyncMasterClientServiceDNSName(apiObject),\n\t\t\t\tk8sutil.CreatePodDNSName(apiObject, role, m.ID),\n\t\t\t}\n\t\t\tmasterEndpoint := spec.Sync.ExternalAccess.ResolveMasterEndpoint(k8sutil.CreateSyncMasterClientServiceDNSName(apiObject), k8sutil.ArangoSyncMasterPort)\n\t\t\tfor _, ep := range masterEndpoint {\n\t\t\t\tif u, err := url.Parse(ep); err == nil {\n\t\t\t\t\tserverNames = append(serverNames, u.Hostname())\n\t\t\t\t}\n\t\t\t}\n\t\t\towner := apiObject.AsOwner()\n\t\t\tif err := createTLSServerCertificate(log, secrets, serverNames, spec.Sync.TLS, tlsKeyfileSecretName, &owner); err != nil && !k8sutil.IsAlreadyExists(err) {\n\t\t\t\treturn maskAny(errors.Wrapf(err, \"Failed to create TLS keyfile secret\"))\n\t\t\t}\n\t\t}\n\n\t\tuid, checksum, err := CreateArangoPod(kubecli, apiObject, pod)\n\t\tif err != nil {\n\t\t\treturn maskAny(err)\n\t\t}\n\t\tlog.Debug().Str(\"pod-name\", m.PodName).Msg(\"Created pod\")\n\n\t\tm.PodUID = uid\n\t\tm.PodSpecVersion = checksum\n\t}\n\t// Record new member phase\n\tm.Phase = newPhase\n\tm.Conditions.Remove(api.ConditionTypeReady)\n\tm.Conditions.Remove(api.ConditionTypeTerminated)\n\tm.Conditions.Remove(api.ConditionTypeTerminating)\n\tm.Conditions.Remove(api.ConditionTypeAgentRecoveryNeeded)\n\tm.Conditions.Remove(api.ConditionTypeAutoUpgrade)\n\tif err := status.Members.Update(m, group); err != nil {\n\t\treturn maskAny(err)\n\t}\n\tif err := r.context.UpdateStatus(status, lastVersion); err != nil {\n\t\treturn maskAny(err)\n\t}\n\t// Create event\n\tr.context.CreateEvent(k8sutil.NewPodCreatedEvent(m.PodName, role, apiObject))\n\n\treturn nil\n}",
"func writeDataInPod(app *v1.Pod, f *framework.Framework) error {\n\tapp.Labels = map[string]string{\"app\": \"write-data-in-pod\"}\n\tapp.Namespace = f.UniqueName\n\n\terr := createApp(f.ClientSet, app, deployTimeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\topt := metav1.ListOptions{\n\t\tLabelSelector: \"app=write-data-in-pod\",\n\t}\n\t// write data to PVC. The idea here is to fill some content in the file\n\t// instead of filling and reverifying the md5sum/data integrity\n\tfilePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + \"/test\"\n\t// While writing more data we are encountering issues in E2E timeout, so keeping it low for now\n\t_, writeErr := execCommandInPod(f, fmt.Sprintf(\"dd if=/dev/zero of=%s bs=1M count=10 status=none\", filePath), app.Namespace, &opt)\n\tExpect(writeErr).Should(BeEmpty())\n\treturn nil\n}",
"func setImage(dep *appsv1.Deployment, ctn string, image string) {\n\tfor index, value := range dep.Spec.Template.Spec.Containers {\n\t\tif value.Name == ctn {\n\t\t\tnewImage := \"\"\n\t\t\toriImage := dep.Spec.Template.Spec.Containers[index].Image\n\t\t\timageStrutct := strings.Split(oriImage, \":\")\n\t\t\tif len(imageStrutct) != 0 {\n\t\t\t\tnewImage = fmt.Sprintf(\"%s:%s\", image, imageStrutct[len(imageStrutct)-1])\n\t\t\t\tdep.Spec.Template.Spec.Containers[index].Image = newImage\n\t\t\t}\n\t\t}\n\t}\n}",
"func podInfo(pod v1.Pod) (result string) {\n\tpodname := pod.Name\n\tpodstatus := strings.ToLower(string(pod.Status.Phase))\n\timages := \"\"\n\tfor _, container := range pod.Spec.Containers {\n\t\timages += fmt.Sprintf(\"%v \", container.Image)\n\t}\n\tresult += fmt.Sprintf(\"pod [%v] is %v and uses image(s) %v\\n\", podname, podstatus, images)\n\treturn result\n}",
"func (b *Builder) pushImage(ctx context.Context, writer io.Writer, imageName string) error {\n\tref, err := reference.ParseNormalizedNamed(imageName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tencodedAuth, err := encodeAuthToBase64(*b.authConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tout, err := b.client.ImagePush(ctx, reference.FamiliarString(ref), types.ImagePushOptions{\n\t\tRegistryAuth: encodedAuth,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toutStream := streams.NewOut(writer)\n\terr = jsonmessage.DisplayJSONMessagesStream(out, outStream, outStream.FD(), outStream.IsTerminal(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GenerateTestFile creates a file with the template values inserted into the template | func GenerateTestFile(testFileName string, templateValues *TemplateValues) error {
outFile, err := os.Create(testFileName)
if err != nil {
fmt.Printf("Error creating test file named: %s\n", testFileName)
}
tmpl := template.Must(template.New("out").Parse(outputTemplate))
if err := tmpl.Execute(outFile, templateValues); err != nil {
return err
}
if err := outFile.Close(); err != nil {
return err
}
return nil
} | [
"func CreateTestFile(fileName string) {\n\tif !strings.HasSuffix(fileName, \"_test.go\") {\n\t\treturn\n\t}\n\tcreateDir(fileName)\n\tif err := ioutil.WriteFile(fileName, []byte(mainttpl), 0644); err != nil {\n\t\tfmt.Printf(\"write file [%s] failed:%v\\n\", fileName, err)\n\t}\n}",
"func GenerateWithTestFile(fileSet *token.FileSet, pkgs map[string]*ast.Package, pkgPath string, typeName string) (*ast.File, *ast.File) {\n\tpkg, spec, zeroValue, values, valueStrings, valueStringLits := findDefinitions(fileSet, pkgs, typeName)\n\treturn generateFile(pkg, spec, zeroValue, values, valueStrings), generateTestFile(pkg, pkgPath, spec, zeroValue, values, valueStringLits)\n}",
"func GenerateTestSript(filePath string) error {\n file, err := os.Create(filePath)\n if err != nil {\n log.Fatal(err, filePath)\n }\n defer file.Close()\n\t_, err = file.WriteString(testScript)\n if err != nil {\n log.Fatal(err, filePath)\n }\n\tlog.Info(\"Default test script generated: \", filePath)\n return err\n}",
"func createFile(input map[string]*context,\n\ttemplate string, conf string) error {\n\t// read the template\n\tcontents, err := ioutil.ReadFile(template)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// replace\n\tfor _, ctx := range input {\n\t\tcontents = bytes.Replace(contents, []byte(ctx.templateKeyword),\n\t\t\t[]byte(ctx.cliInput), -1)\n\t}\n\t// write\n\terr = ioutil.WriteFile(conf, contents, 0644)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn nil\n}",
"func genFile(t *testing.T, dir string, contents string) string {\n\tf, err := ioutil.TempFile(dir, \"tmp*\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer f.Close()\n\tf.Write([]byte(contents))\n\treturn filepath.Base(f.Name())\n}",
"func generateAndRunFile(projectDir, fileName string, tmpl *template.Template) {\n\tprojectPack, err := build.ImportDir(path.Join(projectDir, \"config\"), 0)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"Error while importing project path: %s\", err))\n\t}\n\n\ttmplData := struct {\n\t\tImports []string\n\t\tConfig string\n\t}{\n\t\tImports: projectPack.Imports,\n\t\tConfig: fmt.Sprintf(\"%#v\", viper.AllSettings()),\n\t}\n\tstartFileName := path.Join(projectDir, fileName)\n\tgenerate.CreateFileFromTemplate(startFileName, tmpl, tmplData)\n\tcmd := exec.Command(\"go\", \"run\", startFileName)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Run()\n}",
"func TestGenerator_Generate(t *testing.T) {\n\timportmap := map[string]string{\n\t\t\"policy/v1beta1/value_type.proto\": \"istio.io/api/policy/v1beta1\",\n\t\t\"mixer/adapter/model/v1beta1/extensions.proto\": \"istio.io/api/mixer/adapter/model/v1beta1\",\n\t\t\"gogoproto/gogo.proto\": \"github.com/gogo/protobuf/gogoproto\",\n\t\t\"google/protobuf/duration.proto\": \"github.com/gogo/protobuf/types\",\n\t}\n\n\ttests := []struct {\n\t\tname string\n\t\tfdsFiles map[string]string // FDS and their package import paths\n\t\twant string\n\t}{\n\t\t{\"AllTemplates\", map[string]string{\n\t\t\t\"testdata/check/template.descriptor\": \"istio.io/istio/mixer/template/list\",\n\t\t\t\"testdata/report2/template.descriptor\": \"istio.io/istio/mixer/template/metric\",\n\t\t\t\"testdata/quota/template.descriptor\": \"istio.io/istio/mixer/template/quota\",\n\t\t\t\"testdata/apa/template.descriptor\": \"istio.io/istio/mixer/template/apa\",\n\t\t\t\"testdata/report1/template.descriptor\": \"istio.io/istio/mixer/template/log\"},\n\t\t\t\"testdata/template.gen.go.golden\"},\n\t}\n\tfor _, v := range tests {\n\t\tt.Run(v.name, func(t *testing.T) {\n\t\t\ttestTmpDir := path.Join(os.TempDir(), \"bootstrapTemplateTest\")\n\t\t\t_ = os.MkdirAll(testTmpDir, os.ModeDir|os.ModePerm)\n\t\t\toutFile, err := os.Create(path.Join(testTmpDir, path.Base(v.want)))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tdefer func() {\n\t\t\t\tif !t.Failed() {\n\t\t\t\t\tif removeErr := os.RemoveAll(testTmpDir); removeErr != nil {\n\t\t\t\t\t\tt.Logf(\"Could not remove temporary folder %s: %v\", testTmpDir, removeErr)\n\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"Generated data is located at '%s'\", testTmpDir)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tg := Generator{OutFilePath: outFile.Name(), ImportMapping: importmap}\n\t\t\tif err := g.Generate(v.fdsFiles); err != nil {\n\t\t\t\tt.Fatalf(\"Generate(%s) produced an error: %v\", v.fdsFiles, err)\n\t\t\t}\n\n\t\t\tif same := fileCompare(outFile.Name(), v.want, t.Errorf); !same {\n\t\t\t\tt.Errorf(\"Files %v and %v were not the same.\", outFile.Name(), v.want)\n\t\t\t}\n\t\t})\n\t}\n}",
"func TestUsecaseInteractorFile(t *testing.T){\n\n\t// Build\n\tstatement, err := testInteractorGenerator.File(testEntity)\n\n\t// Return\n\tif err != nil {\n\t\tt.Errorf(`File() failed with error %v`, err)\n\t}\n\n\tf, err := os.Create(\"./testing/usecase/interactor/created/\" + testOutputUsecaseInteractorFileName)\n\tif err != nil {\n\t\tt.Errorf(`File() failed with error %v`, err)\n\t}\n\tbuf := &bytes.Buffer{}\n\terr = statement.Render(buf)\n\tif err != nil {\n\t\tt.Errorf(`File() failed with error %v`, err)\n\t}\n\t_, err = f.Write(buf.Bytes())\n\n\tif buf.String() != testOutputUsecaseInteractorFile {\n\t\tt.Errorf(`File() failed; want \"%s\", got \"%s\"`, testOutputUsecaseInteractorFile, buf.String())\n\t}\n\t\n}",
"func generateFromTemplate(fullpath, templateName, actualTemplate string, data interface{}) {\n\tfullpath = filepath.FromSlash(fullpath)\n\tfile, err := os.Create(fullpath)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error: cannot create component file %s.\\n\"+\n\t\t\t\"The error is: %v\", fullpath, err)\n\t}\n\n\tt := template.Must(template.New(templateName).Parse(actualTemplate))\n\terr = t.Execute(file, data)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error: cannot generate from template %s.\\n\"+\n\t\t\t\"The error is: %v\", fullpath, err)\n\t}\n\n\tlog.Successf(\"Created %v\\n\", fullpath)\n}",
"func TestTemplatingAllVariables(t *testing.T) {\n\n\tsandbox, cleanup := cmdtest.TestSetupWithSandbox(t, true)\n\tdefer cleanup()\n\n\t// gets all the necessary data from a setup function\n\timageNamespace, imageRegistry, stackYaml, labels, err := setupStackPackageTests(sandbox.TestDataPath)\n\tif err != nil {\n\t\tt.Fatalf(\"Error during setup: %v\", err)\n\t}\n\n\t// creates templating.txt file where templating variables will appear\n\ttemplatingPath := filepath.Join(sandbox.TestDataPath, \"templating\", \"templating.txt\")\n\terr = os.MkdirAll(filepath.Dir(templatingPath), 0777)\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating templating dir: %v\", err)\n\t}\n\tfile, err := os.Create(templatingPath)\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating templating file: %v\", err)\n\t}\n\n\t// write some text to file\n\t_, err = file.WriteString(\"{{test}}, id: {{.stack.id}}, name: {{.stack.name}}, version: {{.stack.version}}, description: {{.stack.description}}, tag: {{.stack.tag}}, maintainers: {{.stack.maintainers}}, semver.major: {{.stack.semver.major}}, semver.minor: {{.stack.semver.minor}}, semver.patch: {{.stack.semver.patch}}, semver.majorminor: {{.stack.semver.majorminor}}, image.namespace: {{.stack.image.namespace}}, image.registry: {{.stack.image.registry}}, customvariable1: {{.stack.variable1}}, customvariable2: {{.stack.variable2}}\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error writing to file: %v\", err)\n\t}\n\n\t// save file changes\n\terr = file.Sync()\n\tif err != nil {\n\t\tt.Fatalf(\"Error saving file: %v\", err)\n\t}\n\n\t// create the template metadata\n\ttemplateMetadata, err := cmd.CreateTemplateMap(labels, stackYaml, imageNamespace, imageRegistry)\n\tif err != nil {\n\t\tt.Fatalf(\"Error creating template map: %v\", err)\n\t}\n\n\t// apply templating to stack\n\terr = cmd.ApplyTemplating(templatingPath, templateMetadata)\n\tif err != nil {\n\t\tt.Fatalf(\"Error applying template: %v\", err)\n\t}\n\n\t// read the whole file at once\n\tb, err := ioutil.ReadFile(templatingPath)\n\tif err != nil {\n\t\tt.Fatalf(\"Error reading templating file: %v\", err)\n\t}\n\ts := string(b)\n\tt.Log(s)\n\tif !strings.Contains(s, \"{{test}}, id: starter, name: Starter Sample, version: 0.1.1, description: Runnable starter stack, copy to create a new stack, tag: appsody/starter:SNAPSHOT, maintainers: Henry Nash <[email protected]>, semver.major: 0, semver.minor: 1, semver.patch: 1, semver.majorminor: 0.1, image.namespace: appsody, image.registry: dev.local, customvariable1: value1, customvariable2: value2\") {\n\t\tt.Fatal(\"Templating text did not match expected values\")\n\t}\n\n}",
"func ExampleTemplateProcessor_generate_files() {\n\tapi := new(struct {\n\t\tKey string `json:\"key\" yaml:\"key\"`\n\t\tValue string `json:\"value\" yaml:\"value\"`\n\t})\n\t// create the template\n\ttemplateFn := framework.TemplateProcessor{\n\t\t// Templates input\n\t\tTemplateData: api,\n\t\t// Templates\n\t\tResourceTemplates: []framework.ResourceTemplate{{\n\t\t\tTemplates: parser.TemplateFiles(\"testdata/example/templatefiles/deployment.template.yaml\"),\n\t\t}},\n\t}\n\tcmd := command.Build(templateFn, command.StandaloneEnabled, false)\n\t// mimic standalone mode: testdata/template/config.yaml will be parsed into `api`\n\tcmd.SetArgs([]string{filepath.Join(\"testdata\", \"example\", \"templatefiles\", \"config.yaml\")})\n\tif err := cmd.Execute(); err != nil {\n\t\t_, _ = fmt.Fprintf(cmd.ErrOrStderr(), \"%v\\n\", err)\n\t}\n\n\t// Output:\n\t// # Copyright 2021 The Kubernetes Authors.\n\t// # SPDX-License-Identifier: Apache-2.0\n\t//\n\t// apiVersion: apps/v1\n\t// kind: Deployment\n\t// metadata:\n\t// name: foo\n\t// namespace: default\n\t// annotations:\n\t// a: b\n}",
"func (tg *Generate) WriteTmplToFile(filePath string, tmpl string, data interface{}) (err error) {\n\tfile, err := os.Create(filePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tt := template.Must(template.New(\"toFile\").Funcs(template.FuncMap{\n\t\t\"ToLower\": func(str string) string {\n\t\t\treturn strings.ToLower(str)\n\t\t},\n\t}).Funcs(\n\t\ttemplate.FuncMap{\n\t\t\t\"Initial\": func(str string) string {\n\t\t\t\tif len(str) > 0 {\n\t\t\t\t\treturn string(strings.ToLower(str)[0])\n\t\t\t\t}\n\t\t\t\treturn \"x\"\n\t\t\t},\n\t\t}).Funcs(template.FuncMap{\n\t\t\"Counter\": func(str string) string {\n\t\t\tif s, err := strconv.Atoi(str); err == nil {\n\t\t\t\tcount := s + 1\n\t\t\t\treturn strconv.Itoa(count)\n\t\t\t}\n\t\t\treturn \"0\"\n\t\t}}).Funcs(template.FuncMap{\n\t\t\"GoType\": func(tpe string) string {\n\t\t\tif scler := tg.Scalars.GetScalar(tpe); scler != nil {\n\t\t\t\treturn scler.GoType\n\t\t\t}\n\t\t\treturn \"\"\n\t\t}}).Funcs(template.FuncMap{\n\t\t\"GrpcType\": func(tpe string) string {\n\t\t\tif scler := tg.Scalars.GetScalar(tpe); scler != nil {\n\t\t\t\treturn scler.GrpcType\n\t\t\t}\n\t\t\treturn \"\"\n\t\t}}).Funcs(template.FuncMap{\n\t\t\"GrpcArrayModel\": func(tpe string) string {\n\t\t\tss := strings.Split(tpe, \"[]\")\n\t\t\tif len(ss) > 1 {\n\t\t\t\treturn ss[1]\n\t\t\t}\n\t\t\treturn \"\"\n\t\t}}).Funcs(template.FuncMap{\n\t\t\"GoRegExFormat\": func(str string) string {\n\t\t\tif str == \"\" {\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\tstr = strings.Trim(str, \" \")\n\t\t\t//strbuff := []byte(str)\n\t\t\tif len(str) > 2 {\n\t\t\t\t//\tstrbuff[0] = 96\n\t\t\t\t//\tstrbuff[len(strbuff)-1] = 96\n\t\t\t\tstroriginal := str\n\t\t\t\tstr = strings.Replace(str[1:len(str)-1], \"`\", `\"`+\"`\"+`\"`, -2)\n\t\t\t\treturn string(stroriginal[0]) + str + string(stroriginal[len(stroriginal)-1])\n\t\t\t}\n\t\t\treturn string(str)\n\t\t}}).Parse(tmpl))\n\terr = t.Execute(file, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func Template(output string, tmplPath string, data any) error {\n\tb, err := os.ReadFile(tmplPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl := template.Must(template.New(\"test\").Parse(string(b)))\n\tvar sb strings.Builder\n\terr = tmpl.Execute(&sb, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.WriteFile(output, []byte(sb.String()), 0644) // nolint:gosec // Non-crypto use\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func createFileFromTemplate(input string, output string) {\n\tt := loadTemplateFromFile(input)\n\n\tf, err := os.Create(output)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = t.Execute(f, packageName)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (t *templater) templateFile(workDir string, outDir string, file os.FileInfo, d map[string]interface{}) {\n\tif strings.Contains(file.Name(), \"yaml\") {\n\n\t\tfilePath := workDir + \"/\" + file.Name()\n\t\ttEx := templ.New(file.Name())\n\t\ttEx.Funcs(templateFuncs(workDir))\n\t\ttEx.ParseFiles(filePath)\n\t\tb := bytes.NewBuffer([]byte{})\n\t\terr := tEx.Execute(b, d)\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Failed to execute template\")\n\t\t}\n\t\tnewF, err := os.Create(outDir + \"/\" + file.Name())\n\t\tif err != nil {\n\t\t\tlog.Error(err, \"Failed to create file\", \"file\", file.Name())\n\t\t\treturn\n\t\t}\n\t\tnewF.Write(b.Bytes())\n\t\tnewF.Close()\n\t}\n}",
"func createTestFile(content string) string {\n\tdoc, err := ioutil.TempFile(\"\", \"testFile\")\n\tif err != nil {\n\t\tpanic(\"cannot create the temporary test file\")\n\t}\n\tif _, err := doc.Write([]byte(content)); err != nil {\n\t\tpanic(\"cannot write to the temporary test file\")\n\t}\n\treturn doc.Name()\n}",
"func CreateFile(filePath string, fileTemplate string, values interface{}) {\n\tfile, err := os.Create(filePath)\n\tif err != nil {\n\t\tFatalF(\"An error occurred:\\n %s \\n\", err.Error())\n\t}\n\n\ttmpl, err := template.New(\"test\").Parse(fileTemplate)\n\tif err != nil {\n\t\tFatalF(\"An error occurred:\\n %s \\n\", err.Error())\n\t}\n\n\terr = tmpl.Execute(file, values)\n\tif err != nil {\n\t\tFatalF(\"An error occurred:\\n %s \\n\", err.Error())\n\t}\n}",
"func (cfg *Config) GenerateSampleConfig(dstDir string) error {\n\n\t// get a *Template\n\tat, err := prepareTemplate(\"/templates/config.json.gotmpl\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// check the destination file-path and create if required\n\t_, err = os.Stat(dstDir)\n\tif err != nil {\n\t\tos.Mkdir(dstDir, 0755)\n\t}\n\n\tvar tfDir string\n\n\tfor i := 0; i < 2; i++ {\n\n\t\tswitch i {\n\t\tcase 0:\n\t\t\ttfDir = dstDir + \"/.dev.config.json\"\n\t\t\tcfg.Env = \"dev\"\n\t\tcase 1:\n\t\t\ttfDir = dstDir + \"/.prd.config.json\"\n\t\t\tcfg.Env = \"prod\"\n\t\tdefault:\n\n\t\t}\n\n\t\t// create the .xxx.config.json file\n\t\tf, err := os.Create(tfDir)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"GenerateSampleConfig: %v\\n\", err)\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\n\t\t// set permissions\n\t\terr = f.Chmod(0755)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"GenerateSampleConfig: %v\\n\", err)\n\t\t\treturn err\n\t\t}\n\n\t\t// execute the config.json.gotmpl template using new file .xxx.config.json as a target\n\t\terr = at.Execute(f, cfg)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"GenerateSampleConfig: %v\\n\", err)\n\t\t\treturn err\n\t\t}\n\t\tlog.Println(\"generated:\", tfDir)\n\t}\n\treturn nil\n}",
"func generateFileFromTemplate(t template.Template, data interface{}) (string, error) {\n\t// generate temporary file\n\ttmpfile, err := ioutil.TempFile(\"\", \"lift-*\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer tmpfile.Close()\n\n\t// execute the template, saving the result in the tempfile\n\tif err := t.Execute(tmpfile, data); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"template\": t.Name(),\n\t\t\"file\": tmpfile.Name(),\n\t}).Debug(\"parsed template to file\")\n\n\t// return handle to the temp file\n\treturn tmpfile.Name(), nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ParseFunctions parses a file and returns information about its HTTP handlers | func ParseFunctions(filePath string) *TemplateValues {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments)
if err != nil {
log.Fatal(err)
}
var funcInfos []FunctionInfo
packageName := fmt.Sprint(f.Name)
containsMux := false
for _, decl := range f.Decls {
switch t := decl.(type) {
case *ast.FuncDecl:
responseWriterParamExists := false
requestParamExists := false
for _, param := range t.Type.Params.List {
switch t2 := param.Type.(type) {
case *ast.SelectorExpr:
paramName := fmt.Sprint(t2.Sel.Name)
if paramName == "ResponseWriter" {
responseWriterParamExists = true
}
case *ast.StarExpr:
paramName := fmt.Sprint(t2.X)
if paramName == "&{http Request}" {
requestParamExists = true
}
}
}
if responseWriterParamExists && requestParamExists {
muxVars := getMuxVars(t)
if len(muxVars) > 0 {
containsMux = true
}
funcInfo := FunctionInfo{
Name: fmt.Sprint(t.Name),
MuxVars: muxVars,
}
funcInfos = append(funcInfos, funcInfo)
}
}
}
templateValues := TemplateValues{
FuncInfo: funcInfos,
PackageName: packageName,
ContainsMux: containsMux,
}
return &templateValues
} | [
"func Parse(urlStr string, key ...interface{}) (func(http.Handler) http.Handler,\n\terror) {\n\n\tu, err := mgourl.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts, err := mgo.Dial(u.ShortString())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tk := getkey(key...)\n\tif k == nil {\n\t\tk = u.Database()\n\t}\n\n\treturn Handler(s, u.Database(), k), nil\n}",
"func (i Interface) parseFunctions(contentLines []string) []Function {\n\tvar functions []Function\n\tfor _, line := range contentLines {\n\t\tif isPureVirtualDefinition(line) {\n\t\t\tnewFunction := NewFunction(line)\n\t\t\tfunctions = append(functions, *newFunction)\n\t\t}\n\t}\n\treturn functions\n}",
"func (c *Config) ParseFunction(path string) string {\n\tpath = strings.TrimPrefix(path, c.Path)\n\tpos := strings.Index(path, \"?\")\n\tif pos > -1 {\n\t\tpath = path[:pos]\n\t}\n\tpos = strings.Index(path, \"#\")\n\tif pos > -1 {\n\t\tpath = path[:pos]\n\t}\n\n\treturn strings.Split(path, \"/\")[0]\n}",
"func ParseFile(filename string) (syntax.Lambda, error) {\n\tb, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn syntax.LVar{Name: \"error\"}, err\n\t}\n\tl, err := parseBytes(b)\n\treturn l, err\n}",
"func Parser() {\n\tif err = conf.Parse(configLocation); err != nil {\n\t\tlogger.Logger.Panicln(err)\n\t}\n\n\tparseMysql()\n\tparseGrpc()\n\tparseHTTP()\n\tparseVolumeHandle()\n}",
"func (a *App) Parse() func(http.ResponseWriter, *http.Request, httprouter.Params) {\n\treturn func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\tfile, _, err := r.FormFile(\"content\")\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"read multi part file\")\n\t\t\tweb.RespondError(w, r, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tdefer file.Close()\n\t\tmimeType, err := a.Tika.Detect(context.Background(), file)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"tika detect mime\")\n\t\t\tweb.RespondError(w, r, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\t\tfile.Seek(0, 0)\n\n\t\tswitch mimeType {\n\t\tcase \"application/octet-stream\":\n\t\t\tweb.RespondError(w, r, http.StatusUnsupportedMediaType, errors.New(\"unknown mime type\"))\n\t\t\treturn\n\t\tcase \"text/plain\":\n\t\t\t// markdown will register as text/plain mimeType\n\t\t\t// plain text or markdown does not need to be parsed by apache tika\n\t\t\tbuf := new(bytes.Buffer)\n\t\t\tbuf.ReadFrom(file)\n\n\t\t\ts := buf.String()\n\t\t\tbody := stripmd.Strip(s)\n\n\t\t\tresp := shared.TikaResponse{\n\t\t\t\tBody: body,\n\t\t\t\tDocumentType: mimeType,\n\t\t\t}\n\n\t\t\tweb.Respond(w, r, http.StatusOK, resp)\n\t\t\treturn\n\t\t}\n\n\t\tbody, err := a.Tika.Parse(context.Background(), file)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"tika parse\")\n\t\t\tweb.RespondError(w, r, http.StatusInternalServerError, err)\n\t\t\treturn\n\t\t}\n\n\t\tresp := shared.TikaResponse{\n\t\t\tBody: body,\n\t\t\tDocumentType: mimeType,\n\t\t}\n\n\t\tweb.Respond(w, r, http.StatusOK, resp)\n\t}\n}",
"func (r *Router) ParseHandler(w http.ResponseWriter, req *http.Request) {\n\tparams := req.URL.Query()\n\tif len(params) > 0 {\n\t\tw.WriteHeader(200)\n\t\ts := fmt.Sprintf(\"%v\", params)\n\t\tw.Write([]byte(s))\n\t\treturn\n\t}\n}",
"func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {\n\n\tvar m Middleware\n\terr := m.UnmarshalCaddyfile(h.Dispenser)\n\treturn m, err\n}",
"func MiddlewareHandleParseDidStart(s *Service, p *graphql.Params) parseFinishFuncHandler {\n\tfs := map[string]graphql.ParseFinishFunc{}\n\tfor _, m := range s.mware {\n\t\tctx, finishFn := m.ParseDidStart(p.Context)\n\t\tp.Context = ctx\n\t\tfs[m.Name()] = finishFn\n\t}\n\treturn func(err error) {\n\t\tfor _, fn := range fs {\n\t\t\tfn(err)\n\t\t}\n\t}\n}",
"func ParseEventHandler(conf *config.Config) handlers.Handler {\n\n\tvar eventHandler handlers.Handler\n\n\teventHandler = new(handlers.HandlerConfig)\n\tif err := eventHandler.Init(conf); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn eventHandler\n}",
"func getHandlers(path string) (handlers Handlers, err error) {\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tdanger(\"Cannot read handlers.yaml file\", err)\n\t\treturn\n\t}\n\terr = yaml.Unmarshal(data, &handlers)\n\tif err != nil {\n\t\tdanger(\"Cannot unmarshal handlers.yaml file\", err)\n\t\treturn\n\t}\n\treturn\n}",
"func ParseActionBody(regex string, actionContents BodyContents) ([]Endpoint, error) {\r\n\t// `\"\\w+\":$`\r\n\t// Defines the regex representing function calls\r\n\tfunctionCallRegexp := regexp.MustCompile(regex)\r\n\tfunctionCalMap := make([]Endpoint, 0)\r\n\r\n\t// Gets the list of function available on the function map,\r\n\t// that we can call\r\n\tableToCall := reflect.ValueOf(FuncsStorage).MapKeys()\r\n\tcall := \"\"\r\n\tfor _, v := range ableToCall {\r\n\t\tcall += v.String() + \" \"\r\n\t}\r\n\r\n\t// Iterates through the funcs: part of the action, and extracts it's functions allong with it's parameters\r\n\tfor k, v := range actionContents.FuncsContent {\r\n\r\n\t\tcheck, err := CheckTypeAndConvert(v)\r\n\t\tif err != nil {\r\n\t\t\tDQGLogger.Println(\"Unable to convert to golang data type\")\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\t// Skips not existing function calls\r\n\t\tif !strings.Contains(call, reflect.ValueOf(check).String()) {\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\t// Checks if the current line is not a function call\r\n\t\t// if its not a function call it will search and get the following lines\r\n\t\t// as the previous function call parameters.\r\n\t\tif len(functionCallRegexp.FindAllString(v, -1)) != 0 {\r\n\t\t\t// Sets up the parameters list\r\n\t\t\tparams := make([]interface{}, 0)\r\n\t\t\tfor _, j := range actionContents.FuncsContent[k+1:] {\r\n\r\n\t\t\t\t// Skips iteration if curret line is a function call\r\n\t\t\t\t// after skiping, all the paremeters found\r\n\t\t\t\t// are attributted to the previous function call\r\n\t\t\t\tif string(j[len(j)-1]) == \":\" {\r\n\t\t\t\t\tbreak\r\n\t\t\t\t}\r\n\t\t\t\t// Converts the current line content, into its appropriate go data type\r\n\t\t\t\tres, err := CheckTypeAndConvert(j)\r\n\t\t\t\tif err != nil {\r\n\t\t\t\t\treturn nil, err\r\n\t\t\t\t}\r\n\t\t\t\t// Appends the converted value to the parameter array\r\n\t\t\t\tparams = append(params, res)\r\n\t\t\t}\r\n\t\t\t// Checks if the number of params insside params is equal to the requeired number to call the function\r\n\t\t\tpnum, err := GetFunctionParamsNum(reflect.ValueOf(FuncsStorage[v[1:len(v)-2]]))\r\n\t\t\tif err != nil || len(params) != pnum {\r\n\t\t\t\treturn nil, errors.New(\"bad parameters\")\r\n\t\t\t}\r\n\t\t\t// If all went well, a function call and its params will be appended in the functionCalMap, and returned\r\n\t\t\tfunctionCalMap = append(functionCalMap, Endpoint{\r\n\t\t\t\tFuncName: v[1 : len(v)-2],\r\n\t\t\t\tParams: params,\r\n\t\t\t})\r\n\t\t}\r\n\t}\r\n\treturn functionCalMap, nil\r\n}",
"func ParseFile(filename string) (interface{}, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn Parse(filename, f)\n}",
"func Parse(filename, data string, m ParseMap) (*ast.API, parse.ErrorList) {\n\tvar api *ast.API\n\tparser := func(p *parse.Parser, cst *parse.Branch) {\n\t\tapi = requireAPI(p, cst)\n\t}\n\terrors := parse.Parse(parser, filename, data, parse.NewSkip(\"//\", \"/*\", \"*/\"), m)\n\treturn api, errors\n}",
"func Parse(f *os.File) (*Code, error) {\n\tlanguage, err := detectLanguage(f)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error detecting language: %v\", err)\n\t}\n\tparserMap := map[string]Parser{\n\t\tgolang: &GoParser{},\n\t}\n\treturn parserMap[language].Parse(f)\n}",
"func ParseGo(code string) (functions map[uint64]*function.Function) {\n\n\tcodeLines := strings.Split(code, \"\\n\")\n\n\tfunctions = make(map[uint64]*function.Function)\n\n\tvar (\n\t\tstartLine uint64\n\t\tendLine uint64\n\t\tcomment string\n\t\tfunctionContent string\n\t\tstate = commentSearch\n\t)\n\n\tfor idx, line := range codeLines {\n\t\tlineIdx := uint64(idx + 1)\n\t\t// Searching for comment or \"func\"/\"type\" keywords\n\t\tstrings.ReplaceAll(line, \"\\r\", \"\")\n\n\t\t// We found a comment. Transition state to commentStart\n\t\tif strings.HasPrefix(line, \"//\") && state != commentStart {\n\t\t\tstate = commentStart\n\t\t\tstartLine = lineIdx\n\n\t\t} else if strings.Contains(line, \"func\") || strings.Contains(line, \"type\") {\n\n\t\t\t// we found the function keyword so we transition to funcStart state\n\t\t\tif state == commentSearch {\n\t\t\t\t// If we're coming from commentSearch, that means that we didn't have a comment so we set startLine to idx\n\t\t\t\tstartLine = lineIdx\n\n\t\t\t}\n\t\t\t// otherwise, we're coming from commentStart, that means that we had a comment so we leave startLine as it is\n\t\t\tstate = funcStart\n\t\t} else if strings.HasPrefix(line, \"}\") {\n\t\t\tstate = funcEnd\n\t\t\tendLine = lineIdx\n\n\t\t} else if !(strings.HasPrefix(line, \"//\")) && state != funcStart {\n\t\t\tstate = commentSearch\n\t\t\tcomment = \"\"\n\t\t\tstartLine = 0\n\t\t\tendLine = 0\n\n\t\t}\n\n\t\tswitch state {\n\t\tcase commentSearch:\n\t\t\tcontinue\n\t\tcase commentStart:\n\t\t\tcomment += fmt.Sprintf(\"%v\\n\", line)\n\t\tcase funcStart:\n\t\t\tfunctionContent += fmt.Sprintf(\"%v\\n\", line)\n\n\t\tcase funcEnd:\n\t\t\t// add the closing brace\n\t\t\tfunctionContent += fmt.Sprintf(\"%v\\n\", line)\n\t\t\tendLine = uint64(idx)\n\n\t\t\t// create a new function object with the information we got\n\t\t\tf := function.NewFunction(comment, functionContent, \"noNameYet\", 0, startLine, endLine)\n\n\t\t\t// add that to our map\n\t\t\tfunctions[uint64(f.FuncID)] = f\n\n\t\t\t// reset our state machine\n\t\t\tstartLine = 0\n\t\t\tcomment = \"\"\n\t\t\tfunctionContent = \"\"\n\t\t\tstate = commentSearch\n\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n\treturn\n}",
"func (app *APP) Parse() error {\n\tdataHandlers := map[string]dataHandler{\n\t\tOrganisationsKey: app.LoadOrganisationsFromJSON,\n\t\tUsersKey: app.LoadUsersFromJSON,\n\t\tTicketsKey: app.LoadTicketsFromJSON,\n\t}\n\n\tfor dataType, dataHandler := range dataHandlers {\n\t\terr := dataHandler(app.jsonContents[dataType])\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to read %s data file with error: %s\", dataType, err.Error())\n\t\t}\n\t}\n\treturn nil\n}",
"func Parse(file string) ([]Command, error) {\n\t// Create the AST by parsing src.\n\tfset := token.NewFileSet() // positions are relative to fset\n\tf, err := parser.ParseFile(fset, file, nil, 4)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfuncs := []Command{}\n\tif len(f.Comments) == 0 ||\n\t\tf.Comments[0].Pos() != 1 ||\n\t\tlen(f.Comments[0].List) == 0 ||\n\t\tf.Comments[0].List[0].Text != \"//go:build matr\" {\n\t\treturn funcs, errors.New(\"invalid Matrfile: matr build tag missing or incorrect\")\n\t}\n\n\tfor _, d := range f.Decls {\n\t\tt, ok := d.(*ast.FuncDecl)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tfuncs = append(funcs, parseCmd(t))\n\t}\n\n\treturn funcs, nil\n}",
"func handleParse(h Handler, content string) ([]string, error) {\n\tvar (\n\t\tp CmdParser\n\t\tok bool\n\t)\n\tif p, ok = h.(CmdParser); !ok {\n\t\treturn cmdParserDefault(content), nil\n\t}\n\n\treturn p.Parse(content)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use ShareType.Descriptor instead. | func (ShareType) EnumDescriptor() ([]byte, []int) {
return file_share_distro_proto_rawDescGZIP(), []int{0}
} | [
"func (*FileShare) Descriptor() ([]byte, []int) {\n\treturn file_share_share_proto_rawDescGZIP(), []int{0}\n}",
"func (*ShareRequest) Descriptor() ([]byte, []int) {\n\treturn file_drand_control_proto_rawDescGZIP(), []int{6}\n}",
"func (*ShareDistribution) Descriptor() ([]byte, []int) {\n\treturn file_share_distro_proto_rawDescGZIP(), []int{0}\n}",
"func (*ShareSaveRequest) Descriptor() ([]byte, []int) {\n\treturn file_share_share_proto_rawDescGZIP(), []int{1}\n}",
"func (*GenerateFileShareLinkRequest) Descriptor() ([]byte, []int) {\n\treturn file_space_proto_rawDescGZIP(), []int{38}\n}",
"func (*TargetImpressionShare) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v14_common_bidding_proto_rawDescGZIP(), []int{11}\n}",
"func (*SharedURL) Descriptor() ([]byte, []int) {\n\treturn file_board_board_proto_rawDescGZIP(), []int{7}\n}",
"func (*GenerateFileShareLinkResponse) Descriptor() ([]byte, []int) {\n\treturn file_space_proto_rawDescGZIP(), []int{39}\n}",
"func (*ShareBucketViaIdentityRequest) Descriptor() ([]byte, []int) {\n\treturn file_space_proto_rawDescGZIP(), []int{36}\n}",
"func (*CrOSSelectShareTargetRequest) Descriptor() ([]byte, []int) {\n\treturn file_nearby_share_service_proto_rawDescGZIP(), []int{6}\n}",
"func (*ShareResponse) Descriptor() ([]byte, []int) {\n\treturn file_drand_control_proto_rawDescGZIP(), []int{7}\n}",
"func (*LiquidityProviderFeeShare) Descriptor() ([]byte, []int) {\n\treturn file_vega_proto_rawDescGZIP(), []int{33}\n}",
"func (*VSSShareWithAuthSigMessage) Descriptor() ([]byte, []int) {\n\treturn file_protob_shared_proto_rawDescGZIP(), []int{2}\n}",
"func (*ShareBucketViaIdentityResponse) Descriptor() ([]byte, []int) {\n\treturn file_space_proto_rawDescGZIP(), []int{37}\n}",
"func (sm *Manager) Share(ctx context.Context, md *provider.ResourceId, g *ocm.ShareGrant, name string,\n\tpi *ocmprovider.ProviderInfo, pm string, owner *userpb.UserId, token string, st ocm.Share_ShareType) (*ocm.Share, error) {\n\n\t// Since both OCMCore and OCMShareProvider use the same package, we distinguish\n\t// between calls received from them on the basis of whether they provide info\n\t// about the remote provider on which the share is to be created.\n\t// If this info is provided, this call is on the owner's mesh provider and so\n\t// we call the CreateOCMCoreShare method on the remote provider as well,\n\t// else this is received from another provider and we only create a local share.\n\tvar isOwnersMeshProvider bool\n\tvar apiMethod string\n\tvar username string\n\tif pi != nil {\n\t\tisOwnersMeshProvider = true\n\t\tapiMethod = \"addSentShare\"\n\t\tusername = getUsername(ctx)\n\t\ttoken = randSeq(10)\n\t} else {\n\t\tapiMethod = \"addReceivedShare\"\n\t\tusername = g.Grantee.GetUserId().OpaqueId\n\t}\n\n\tvar userID *userpb.UserId\n\tif !isOwnersMeshProvider {\n\t\t// Since this call is on the remote provider, the owner of the resource is expected to be specified.\n\t\tif owner == nil {\n\t\t\treturn nil, errors.New(\"nextcloud: owner of resource not provided\")\n\t\t}\n\t\tuserID = owner\n\t} else {\n\t\tuserID = ctxpkg.ContextMustGetUser(ctx).GetId()\n\t}\n\n\t// do not allow share to myself if share is for a user\n\tif g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER && utils.UserEqual(g.Grantee.GetUserId(), userID) {\n\t\treturn nil, errors.New(\"nextcloud: user and grantee are the same\")\n\t}\n\n\ts := &ocm.Share{\n\t\tName: name,\n\t\tResourceId: md,\n\t\tPermissions: g.Permissions,\n\t\tGrantee: g.Grantee,\n\t\tOwner: userID,\n\t\tCreator: userID,\n\t\tShareType: st,\n\t}\n\n\tvar encShare []byte\n\tvar err error\n\n\tif isOwnersMeshProvider {\n\t\t// adding the webdav sharedSecret in the Grantee because Share itself doesn't have an Opaque field,\n\t\t// see https://cs3org.github.io/cs3apis/#cs3.storage.provider.v1beta1.Grantee\n\t\t// and https://cs3org.github.io/cs3apis/#cs3.sharing.ocm.v1beta1.Share\n\t\ts.Grantee.Opaque = &typespb.Opaque{\n\t\t\tMap: map[string]*typespb.OpaqueEntry{\n\t\t\t\t\"sharedSecret\": {\n\t\t\t\t\tDecoder: \"plain\",\n\t\t\t\t\tValue: []byte(token),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tencShare, err = utils.MarshalProtoV1ToJSON(s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\t// adding the webdav sharedSecret and remote share id (called the \"ProviderID\" in OCM) in the Grantee because Share itself doesn't have an Opaque field,\n\t\t// see https://cs3org.github.io/cs3apis/#cs3.storage.provider.v1beta1.Grantee\n\t\t// and https://cs3org.github.io/cs3apis/#cs3.sharing.ocm.v1beta1.Share\n\t\ts.Grantee.Opaque = &typespb.Opaque{\n\t\t\tMap: map[string]*typespb.OpaqueEntry{\n\t\t\t\t\"sharedSecret\": {\n\t\t\t\t\tDecoder: \"plain\",\n\t\t\t\t\tValue: []byte(token),\n\t\t\t\t},\n\t\t\t\t\"remoteShareId\": {\n\t\t\t\t\tDecoder: \"plain\",\n\t\t\t\t\tValue: g.Grantee.Opaque.Map[\"remoteShareId\"].Value,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tencShare, err = utils.MarshalProtoV1ToJSON(&ocm.ReceivedShare{\n\t\t\tShare: s,\n\t\t\tState: ocm.ShareState_SHARE_STATE_PENDING,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t_, body, err := sm.do(ctx, Action{apiMethod, string(encShare)}, username)\n\n\ts.Id = &ocm.ShareId{\n\t\tOpaqueId: string(body),\n\t}\n\tnow := time.Now().UnixNano()\n\ts.Ctime = &typespb.Timestamp{\n\t\tSeconds: uint64(now / 1000000000),\n\t\tNanos: uint32(now % 1000000000),\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif isOwnersMeshProvider {\n\t\t// token, ok := ctxpkg.ContextGetToken(ctx)\n\t\t// if !ok {\n\t\t// \treturn nil, errors.New(\"Could not get token from context\")\n\t\t// }\n\t\tvar protocol map[string]interface{}\n\t\tif st == ocm.Share_SHARE_TYPE_TRANSFER {\n\t\t\tprotocol = map[string]interface{}{\n\t\t\t\t\"name\": \"datatx\",\n\t\t\t\t\"options\": map[string]string{\n\t\t\t\t\t\"permissions\": pm,\n\t\t\t\t\t\"token\": token, // FIXME: Where is the token for datatx generated?\n\t\t\t\t},\n\t\t\t}\n\t\t} else {\n\t\t\tprotocol = map[string]interface{}{\n\t\t\t\t\"name\": \"webdav\",\n\t\t\t\t\"options\": map[string]string{\n\t\t\t\t\t\"permissions\": pm,\n\t\t\t\t\t\"sharedSecret\": token,\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t\trequestBodyMap := map[string]interface{}{\n\t\t\t\"shareWith\": g.Grantee.GetUserId().OpaqueId,\n\t\t\t\"name\": name,\n\t\t\t\"providerId\": s.Id.OpaqueId,\n\t\t\t\"owner\": userID.OpaqueId,\n\t\t\t\"protocol\": protocol,\n\t\t\t\"meshProvider\": userID.Idp, // FIXME: move this into the 'owner' string?\n\t\t}\n\t\terr = sender.Send(requestBodyMap, pi)\n\t\tif err != nil {\n\t\t\terr = errors.Wrap(err, \"error sending OCM POST\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn s, nil\n}",
"func (*RegenerateShareableLinkIdRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_ads_googleads_v8_services_third_party_app_analytics_link_service_proto_rawDescGZIP(), []int{1}\n}",
"func (sm *Manager) Share(ctx context.Context, md *provider.ResourceInfo, g *collaboration.ShareGrant) (*collaboration.Share, error) {\n\ttype paramsObj struct {\n\t\tMd *provider.ResourceInfo `json:\"md\"`\n\t\tG *collaboration.ShareGrant `json:\"g\"`\n\t}\n\tbodyObj := ¶msObj{\n\t\tMd: md,\n\t\tG: g,\n\t}\n\tbodyStr, err := json.Marshal(bodyObj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, body, err := sm.do(ctx, Action{\"Share\", string(bodyStr)})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taltResult := &ShareAltMap{}\n\terr = json.Unmarshal(body, &altResult)\n\tif altResult == nil {\n\t\treturn nil, err\n\t}\n\treturn &collaboration.Share{\n\t\tId: altResult.ID,\n\t\tResourceId: altResult.ResourceID,\n\t\tPermissions: altResult.Permissions,\n\t\tGrantee: &provider.Grantee{\n\t\t\tId: altResult.Grantee.ID,\n\t\t},\n\t\tOwner: altResult.Owner,\n\t\tCreator: altResult.Creator,\n\t\tCtime: altResult.Ctime,\n\t\tMtime: altResult.Mtime,\n\t}, err\n}",
"func (*CreateOCMCoreShareRequest) Descriptor() ([]byte, []int) {\n\treturn file_cs3_ocm_core_v1beta1_ocm_core_api_proto_rawDescGZIP(), []int{0}\n}",
"func (*ShareProfileRequest) Descriptor() ([]byte, []int) {\n\treturn file_parca_query_v1alpha1_query_proto_rawDescGZIP(), []int{31}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use ShareDistribution.ProtoReflect.Descriptor instead. | func (*ShareDistribution) Descriptor() ([]byte, []int) {
return file_share_distro_proto_rawDescGZIP(), []int{0}
} | [
"func (*BodyOldPeer) Descriptor() ([]byte, []int) {\n\treturn file_github_com_getamis_alice_crypto_tss_addshare_message_proto_rawDescGZIP(), []int{1}\n}",
"func (*ResourceManifest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_gkehub_v1_membership_proto_rawDescGZIP(), []int{4}\n}",
"func (*Public2Privacy) Descriptor() ([]byte, []int) {\n\treturn file_privacy_proto_rawDescGZIP(), []int{1}\n}",
"func (*ShareRequest) Descriptor() ([]byte, []int) {\n\treturn file_drand_control_proto_rawDescGZIP(), []int{6}\n}",
"func (*PatchCollectorsRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_clarifai_api_service_proto_rawDescGZIP(), []int{161}\n}",
"func (*DistributionChange) Descriptor() ([]byte, []int) {\n\treturn edgelq_applications_proto_v1alpha2_distribution_change_proto_rawDescGZIP(), []int{0}\n}",
"func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}",
"func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}",
"func (*Privacy2Privacy) Descriptor() ([]byte, []int) {\n\treturn file_privacy_proto_rawDescGZIP(), []int{2}\n}",
"func (*GetDelegationTokenRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{3}\n}",
"func (*AddPeerRequest) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{8}\n}",
"func (*DistributionChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_applications_proto_v1alpha2_distribution_change_proto_rawDescGZIP(), []int{0, 3}\n}",
"func (*NetProtoTalker) Descriptor() ([]byte, []int) {\n\treturn file_pkg_smgrpc_smgrpc_proto_rawDescGZIP(), []int{1}\n}",
"func (*CMsgSocialFeedRequest) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_proto_rawDescGZIP(), []int{304}\n}",
"func (*CredentialsProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{2}\n}",
"func (*GenerateFileShareLinkRequest) Descriptor() ([]byte, []int) {\n\treturn file_space_proto_rawDescGZIP(), []int{38}\n}",
"func (*NamedSecuritySchemeOrReference) Descriptor() ([]byte, []int) {\n\treturn file_openapiv3_OpenAPIv3_proto_rawDescGZIP(), []int{41}\n}",
"func (*CancelDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{8}\n}",
"func (*DistributionChange_Added) Descriptor() ([]byte, []int) {\n\treturn edgelq_applications_proto_v1alpha2_distribution_change_proto_rawDescGZIP(), []int{0, 0}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewCredentialRepository instance of CredentialRepository | func NewCredentialRepository(db *pgxpool.Pool) CredentialRepository {
return &repo{
DB: db,
}
} | [
"func NewRepo(c backend.Credentials) (*Repo, error) {\n var err error\n r := &Repo{}\n r.Session, err = NewSession(c[\"dbhost\"])\n if err != nil {\n return nil, err\n }\n\n r.Db = r.Session.DB(c[\"dbname\"])\n return r, nil\n}",
"func newCredential(mc *metric, cs *clientset.ClientSet) *Credential {\n\tstg := new(Credential)\n\tstg.cs = cs\n\topt := cc.FeedServer().FSLocalCache\n\n\tstg.client = gcache.New(int(opt.CredentialCacheSize)).\n\t\tLRU().\n\t\tEvictedFunc(stg.evictRecorder).\n\t\tExpiration(time.Duration(opt.CredentialCacheTTLSec) * time.Second).\n\t\tBuild()\n\tstg.mc = mc\n\tstg.collectHitRate()\n\n\treturn stg\n}",
"func New(sourceFactory *factory.Factory, username string, password string) (*Credential, error) {\n\treturn NewProfile(global.DEFAULT_PROFILE_NAME, sourceFactory, username, password)\n}",
"func NewCredentialCommand(io ui.IO, clientFactory ClientFactory, credentialStore CredentialConfig) *CredentialCommand {\n\treturn &CredentialCommand{\n\t\tio: io,\n\t\tclientFactory: clientFactory,\n\t\tcredentialStore: credentialStore,\n\t}\n}",
"func NewAuthRepo() IAuthRepository {\n\tdb := Connect()\n\treturn AuthRepository{db}\n}",
"func NewCredentialUserRegistrationDetails()(*CredentialUserRegistrationDetails) {\n m := &CredentialUserRegistrationDetails{\n Entity: *NewEntity(),\n }\n return m\n}",
"func NewCredential(config *Config) (credential Credential, err error) {\n\tif config == nil {\n\t\tconfig, err = defaultChain.resolve()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn NewCredential(config)\n\t}\n\tswitch tea.StringValue(config.Type) {\n\tcase \"access_key\":\n\t\terr = checkAccessKey(config)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tcredential = newAccessKeyCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret))\n\tcase \"sts\":\n\t\terr = checkSTS(config)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tcredential = newStsTokenCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.SecurityToken))\n\tcase \"ecs_ram_role\":\n\t\tcheckEcsRAMRole(config)\n\t\truntime := &utils.Runtime{\n\t\t\tHost: tea.StringValue(config.Host),\n\t\t\tProxy: tea.StringValue(config.Proxy),\n\t\t\tReadTimeout: tea.IntValue(config.Timeout),\n\t\t\tConnectTimeout: tea.IntValue(config.ConnectTimeout),\n\t\t}\n\t\tcredential = newEcsRAMRoleCredential(tea.StringValue(config.RoleName), runtime)\n\tcase \"ram_role_arn\":\n\t\terr = checkRAMRoleArn(config)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\truntime := &utils.Runtime{\n\t\t\tHost: tea.StringValue(config.Host),\n\t\t\tProxy: tea.StringValue(config.Proxy),\n\t\t\tReadTimeout: tea.IntValue(config.Timeout),\n\t\t\tConnectTimeout: tea.IntValue(config.ConnectTimeout),\n\t\t}\n\t\tcredential = newRAMRoleArnCredential(tea.StringValue(config.AccessKeyId), tea.StringValue(config.AccessKeySecret), tea.StringValue(config.RoleArn), tea.StringValue(config.RoleSessionName), tea.StringValue(config.Policy), tea.IntValue(config.RoleSessionExpiration), runtime)\n\tcase \"rsa_key_pair\":\n\t\terr = checkRSAKeyPair(config)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tfile, err1 := os.Open(tea.StringValue(config.PrivateKeyFile))\n\t\tif err1 != nil {\n\t\t\terr = fmt.Errorf(\"InvalidPath: Can not open PrivateKeyFile, err is %s\", err1.Error())\n\t\t\treturn\n\t\t}\n\t\tdefer file.Close()\n\t\tvar privateKey string\n\t\tscan := bufio.NewScanner(file)\n\t\tfor scan.Scan() {\n\t\t\tif strings.HasPrefix(scan.Text(), \"----\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprivateKey += scan.Text() + \"\\n\"\n\t\t}\n\t\truntime := &utils.Runtime{\n\t\t\tHost: tea.StringValue(config.Host),\n\t\t\tProxy: tea.StringValue(config.Proxy),\n\t\t\tReadTimeout: tea.IntValue(config.Timeout),\n\t\t\tConnectTimeout: tea.IntValue(config.ConnectTimeout),\n\t\t}\n\t\tcredential = newRsaKeyPairCredential(privateKey, tea.StringValue(config.PublicKeyId), tea.IntValue(config.SessionExpiration), runtime)\n\tcase \"bearer\":\n\t\tif tea.StringValue(config.BearerToken) == \"\" {\n\t\t\terr = errors.New(\"BearerToken cannot be empty\")\n\t\t\treturn\n\t\t}\n\t\tcredential = newBearerTokenCredential(tea.StringValue(config.BearerToken))\n\tdefault:\n\t\terr = errors.New(\"Invalid type option, support: access_key, sts, ecs_ram_role, ram_role_arn, rsa_key_pair\")\n\t\treturn\n\t}\n\treturn credential, nil\n}",
"func NewCredentialReconciler(helm helm.Client) *CredentialReconciler {\n\treturn &CredentialReconciler{\n\t\thelm: helm,\n\t\tresourceCache: make(map[kindWithGroup]metav1.APIResource),\n\t}\n}",
"func (s *server) newRepoClient(ctx context.Context, storageName string) (gitalypb.RepositoryServiceClient, error) {\n\tgitalyServerInfo, err := storage.ExtractGitalyServer(ctx, storageName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconn, err := s.conns.Dial(ctx, gitalyServerInfo.Address, gitalyServerInfo.Token)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn gitalypb.NewRepositoryServiceClient(conn), nil\n}",
"func getCredential(credentialID string, nodeURL string, repositoryContract string) (*models.Credential, error) {\n\tclient := new(bl.Client)\n\tclient.Connect(nodeURL)\n\tidHash := sha256.Sum256([]byte(credentialID))\n\n\t_, err := client.GetCredential(common.HexToAddress(repositoryContract), idHash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient.Close()\n\n\treturn nil, nil\n}",
"func New(cfg config.DataConfig, repository model.Repository) (*GitRepo, error) {\n\tr, err := g2g.OpenRepository(repository.ClonePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &GitRepo{Repository: repository, cfg: cfg, r: r}, nil\n}",
"func newRepository() Repository {\n\tif cfg == nil {\n\t\tpanic(fmt.Errorf(\"missing configuration\"))\n\t}\n\tif log == nil {\n\t\tpanic(fmt.Errorf(\"missing logger\"))\n\t}\n\n\tp2p.SetConfig(cfg)\n\tp2p.SetLogger(log)\n\n\t// create connections\n\tcaBridge, dbBridge, rpcBridge, geoBridge, err := connect(cfg, log)\n\tif err != nil {\n\t\tlog.Fatal(\"repository init failed\")\n\t\treturn nil\n\t}\n\n\t// construct the proxy instance\n\tp := proxy{\n\t\tcache: caBridge,\n\t\tdb: dbBridge,\n\t\trpc: rpcBridge,\n\t\tgeoip: geoBridge,\n\t\tlog: log,\n\t\tcfg: cfg,\n\n\t\t// get the map of governance contracts\n\t\tgovContracts: governanceContractsMap(cfg.Governance),\n\n\t\t// keep reference to the SOL compiler\n\t\tsolCompiler: cfg.Compiler.DefaultSolCompilerPath,\n\t}\n\n\t// return the proxy\n\treturn &p\n}",
"func NewRepository(repoName string) *Repository {\n\n\tclientIndex := model.ByEquality(\"ClientId\")\n\tclientIndex.Unique = true\n\t//\tuserIndex := model.ByEquality(\"UserId\")\n\t//\tuserIndex.Unique = true\n\n\treturn &Repository{\n\t\tName: repoName,\n\t\tmesssages: model.NewTable(store.DefaultStore, repoName, model.Indexes(clientIndex), nil),\n\t}\n}",
"func newRepository(\n\tid borges.RepositoryID,\n\tsto storage.Storer,\n\tfs billy.Filesystem,\n\tm borges.Mode,\n\ttransactional bool,\n\tl *Location,\n) (*Repository, error) {\n\trepo, err := git.Open(sto, nil)\n\tif err != nil {\n\t\tif err == git.ErrRepositoryNotExists {\n\t\t\trepo, err = git.Init(sto, nil)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, borges.ErrLocationNotExists.Wrap(err, id)\n\t\t}\n\t}\n\n\treturn &Repository{\n\t\tid: id,\n\t\trepo: repo,\n\t\ts: sto,\n\t\tfs: fs,\n\t\tmode: m,\n\t\ttransactional: transactional,\n\t\tlocation: l,\n\t\tcreateVersion: -1,\n\t}, nil\n}",
"func (m *repoManager) newRepo(alias, description string, assign *dvid.UUID, passcode string) (*repoT, error) {\n\tif assign != nil {\n\t\tm.repoMutex.RLock()\n\t\t// Make sure there's not already a repo with this UUID.\n\t\tif _, found := m.repos[*assign]; found {\n\t\t\tm.repoMutex.RUnlock()\n\t\t\treturn nil, ErrExistingUUID\n\t\t}\n\t\tm.repoMutex.RUnlock()\n\t}\n\tuuid, v, err := m.newUUID(assign)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tid, err := m.newRepoID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := newRepo(uuid, v, id, passcode)\n\n\tm.idMutex.Lock()\n\tm.repoToUUID[id] = uuid\n\tm.idMutex.Unlock()\n\tif err := m.putCaches(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.repoMutex.Lock()\n\tm.repos[uuid] = r\n\tm.repoMutex.Unlock()\n\n\tr.alias = alias\n\tr.description = description\n\n\tif err := r.save(); err != nil {\n\t\treturn r, err\n\t}\n\tif err := r.initMutationID(m.store, m.mutationIDStart, m.readOnly); err != nil {\n\t\treturn r, err\n\t}\n\tdvid.Infof(\"Created and saved new repo %q, id %d\\n\", uuid, id)\n\treturn r, nil\n}",
"func (g *GitCredential) Clone() GitCredential {\n\tclone := GitCredential{}\n\n\tvalue := reflect.ValueOf(g).Elem()\n\ttypeOfT := value.Type()\n\tfor i := 0; i < value.NumField(); i++ {\n\t\tfield := value.Field(i)\n\t\tvalue := field.String()\n\t\tv := reflect.ValueOf(&clone).Elem().FieldByName(typeOfT.Field(i).Name)\n\t\tv.SetString(value)\n\t}\n\n\treturn clone\n}",
"func ProvideAuthRepository(DB *gorm.DB) AuthRepository {\n\treturn AuthRepository{\n\t\tDB: DB,\n\t}\n}",
"func NewRepository(db middleware.Pool) *Repository {\n\treturn &Repository{Database: db}\n}",
"func NewCredentialParams(values map[string]string) *CredentialParams {\n\treturn &CredentialParams{\n\t\tConfigParams: *config.NewConfigParams(values),\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetCPUInfo reads the cpu info from the system | func (info *RpiInfo) GetCPUInfo() error {
file, err := os.Open("/proc/cpuinfo")
if err != nil {
return err
}
defer file.Close()
info.readCPUInfo(file)
return nil
} | [
"func (s *Simple) CPUInfo(req *acomm.Request) (interface{}, *url.URL, error) {\n\tvar args CPUInfoArgs\n\tif err := req.UnmarshalArgs(&args); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif args.GuestID == \"\" {\n\t\treturn nil, nil, errors.New(\"missing guest_id\")\n\t}\n\n\tresult := &CPUInfoResult{\n\t\t&CPUInfo{\n\t\t\tProcessor: 0,\n\t\t\tMHz: 2600,\n\t\t},\n\t\t&CPUInfo{\n\t\t\tProcessor: 1,\n\t\t\tMHz: 2600,\n\t\t},\n\t}\n\treturn result, nil, nil\n}",
"func (s *Stats) GetCPUInfo() {\n\n if s.CPUInfo == nil {\n s.CPUInfo = new(CPUInfo)\n }\n\n s.CPUInfo.CPU, _ = cpu.Info()\n}",
"func CPUInfo(args ...int) (string, error) {\n\tinfo, err := cpu.Info()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(info) == 0 {\n\t\treturn \"\", errors.New(\"no CPU detected\")\n\t}\n\n\tif len(args) > 0 {\n\t\treturn info[args[0]].ModelName, nil\n\t}\n\n\treturn info[0].ModelName, nil\n}",
"func getCPUInfo(cpuInfoFile string) (string, error) {\n\ttext, err := katautils.GetFileContents(cpuInfoFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcpus := strings.SplitAfter(text, \"\\n\\n\")\n\n\ttrimmed := strings.TrimSpace(cpus[0])\n\tif trimmed == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Cannot determine CPU details\")\n\t}\n\n\treturn trimmed, nil\n}",
"func CPUInfo() ([]v1.NodeCPUInfo, error) {\n\tcpus, err := cpu.CPUInfo()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret := make([]v1.NodeCPUInfo, len(cpus))\n\n\tfor i, cpu := range cpus {\n\t\tret[i] = v1.NodeCPUInfo{\n\t\t\tCPU: cpu.CPU,\n\t\t\tVendorID: cpu.VendorID,\n\t\t\tFamily: cpu.Family,\n\t\t\tModel: cpu.Model,\n\t\t\tStepping: cpu.Stepping,\n\t\t\tPhysicalID: cpu.PhysicalID,\n\t\t\tCoreID: cpu.CoreID,\n\t\t\tCores: cpu.Cores,\n\t\t\tModelName: cpu.ModelName,\n\t\t\tMhz: cpu.Mhz,\n\t\t\tCacheSize: cpu.CacheSize,\n\t\t\tFlags: cpu.Flags,\n\t\t}\n\t}\n\n\treturn ret, nil\n}",
"func ComputeCPUInfo(ctx context.Context, cluster *object.ClusterComputeResource) (uint64, error) {\n\tvar cr mo.ComputeResource\n\tvar hosts []mo.HostSystem\n\tvar minFreq uint64\n\n\tif cluster == nil {\n\t\treturn 0, errors.New(\"Must have a valid cluster reference to compute the cpu info\")\n\t}\n\n\terr := cluster.Properties(ctx, cluster.Reference(), nil, &cr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif len(cr.Host) == 0 {\n\t\treturn 0, errors.New(\"No hosts found in the cluster\")\n\t}\n\n\tpc := property.DefaultCollector(cluster.Client())\n\terr = pc.Retrieve(ctx, cr.Host, []string{\"summary\"}, &hosts)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tfor _, h := range hosts {\n\t\tif h.Summary.Hardware == nil {\n\t\t\tcontinue\n\t\t}\n\t\thostCpuMHz := uint64(h.Summary.Hardware.CpuMhz)\n\t\tif hostCpuMHz < minFreq || minFreq == 0 {\n\t\t\tminFreq = hostCpuMHz\n\t\t}\n\t}\n\n\treturn minFreq, nil\n}",
"func Get() (CPUInfo, error) {\n\tbytes, err := ioutil.ReadFile(\"/proc/cpuinfo\")\n\tif err != nil {\n\t\treturn CPUInfo{}, err\n\t}\n\n\tcpuInfo, err := parse(bytes)\n\n\treturn cpuInfo, err\n}",
"func GetCPUInfos() (map[string][]string, error) {\n\treturninfos := make(map[string][]string)\n\n\tcpuinfo, err := os.OpenFile(\"/proc/cpuinfo\", os.O_RDONLY|os.O_SYNC, 0666)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not open /proc/cpuinfo\")\n\t}\n\tdefer cpuinfo.Close()\n\tcpuinfo.Seek(0, 0)\n\tinforeader := bufio.NewReader(cpuinfo)\n\tvar line string\n\tfor line, err = inforeader.ReadString('\\n'); err == nil; line, err = inforeader.ReadString('\\n') {\n\t\tfields := strings.SplitN(line, \":\", 2)\n\t\tif len(fields) == 2 {\n\t\t\tkey := strings.TrimSpace(fields[0])\n\t\t\tvalue := strings.TrimSpace(fields[1])\n\t\t\tif pv, inmap := returninfos[key]; inmap {\n\t\t\t\treturninfos[key] = append(pv, value)\n\t\t\t} else {\n\t\t\t\treturninfos[key] = []string{value}\n\t\t\t}\n\t\t}\n\t}\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\treturn returninfos, err\n}",
"func (s *Stats) GetAllCPUInfo() {\n s.GetCPUInfo()\n s.GetCPUTimes()\n}",
"func initCPUInfo() {\n\tif runtime.GOOS != \"linux\" {\n\t\t// Don't try to read Linux-specific /proc files or\n\t\t// warn about them not existing.\n\t\treturn\n\t}\n\tcpuinfob, err := ioutil.ReadFile(\"/proc/cpuinfo\")\n\tif err != nil {\n\t\t// Leave everything at 0, nothing can be done.\n\t\tlog.Warningf(\"Could not read /proc/cpuinfo: %v\", err)\n\t\treturn\n\t}\n\tcpuinfo := string(cpuinfob)\n\n\t// We get the value straight from host /proc/cpuinfo.\n\tfor _, line := range strings.Split(cpuinfo, \"\\n\") {\n\t\tswitch {\n\t\tcase strings.Contains(line, \"BogoMIPS\"):\n\t\t\tsplitMHz := strings.Split(line, \":\")\n\t\t\tif len(splitMHz) < 2 {\n\t\t\t\tlog.Warningf(\"Could not read /proc/cpuinfo: malformed BogoMIPS\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// If there was a problem, leave cpuFreqMHz as 0.\n\t\t\tvar err error\n\t\t\thostFeatureSet.cpuFreqMHz, err = strconv.ParseFloat(strings.TrimSpace(splitMHz[1]), 64)\n\t\t\tif err != nil {\n\t\t\t\thostFeatureSet.cpuFreqMHz = 0.0\n\t\t\t\tlog.Warningf(\"Could not parse BogoMIPS value %v: %v\", splitMHz[1], err)\n\t\t\t}\n\t\tcase strings.Contains(line, \"CPU implementer\"):\n\t\t\tsplitImpl := strings.Split(line, \":\")\n\t\t\tif len(splitImpl) < 2 {\n\t\t\t\tlog.Warningf(\"Could not read /proc/cpuinfo: malformed CPU implementer\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// If there was a problem, leave cpuImplHex as 0.\n\t\t\tvar err error\n\t\t\thostFeatureSet.cpuImplHex, err = strconv.ParseUint(strings.TrimSpace(splitImpl[1]), 0, 64)\n\t\t\tif err != nil {\n\t\t\t\thostFeatureSet.cpuImplHex = 0\n\t\t\t\tlog.Warningf(\"Could not parse CPU implementer value %v: %v\", splitImpl[1], err)\n\t\t\t}\n\t\tcase strings.Contains(line, \"CPU architecture\"):\n\t\t\tsplitArch := strings.Split(line, \":\")\n\t\t\tif len(splitArch) < 2 {\n\t\t\t\tlog.Warningf(\"Could not read /proc/cpuinfo: malformed CPU architecture\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// If there was a problem, leave cpuArchDec as 0.\n\t\t\tvar err error\n\t\t\thostFeatureSet.cpuArchDec, err = strconv.ParseUint(strings.TrimSpace(splitArch[1]), 0, 64)\n\t\t\tif err != nil {\n\t\t\t\thostFeatureSet.cpuArchDec = 0\n\t\t\t\tlog.Warningf(\"Could not parse CPU architecture value %v: %v\", splitArch[1], err)\n\t\t\t}\n\t\tcase strings.Contains(line, \"CPU variant\"):\n\t\t\tsplitVar := strings.Split(line, \":\")\n\t\t\tif len(splitVar) < 2 {\n\t\t\t\tlog.Warningf(\"Could not read /proc/cpuinfo: malformed CPU variant\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// If there was a problem, leave cpuVarHex as 0.\n\t\t\tvar err error\n\t\t\thostFeatureSet.cpuVarHex, err = strconv.ParseUint(strings.TrimSpace(splitVar[1]), 0, 64)\n\t\t\tif err != nil {\n\t\t\t\thostFeatureSet.cpuVarHex = 0\n\t\t\t\tlog.Warningf(\"Could not parse CPU variant value %v: %v\", splitVar[1], err)\n\t\t\t}\n\t\tcase strings.Contains(line, \"CPU part\"):\n\t\t\tsplitPart := strings.Split(line, \":\")\n\t\t\tif len(splitPart) < 2 {\n\t\t\t\tlog.Warningf(\"Could not read /proc/cpuinfo: malformed CPU part\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// If there was a problem, leave cpuPartHex as 0.\n\t\t\tvar err error\n\t\t\thostFeatureSet.cpuPartHex, err = strconv.ParseUint(strings.TrimSpace(splitPart[1]), 0, 64)\n\t\t\tif err != nil {\n\t\t\t\thostFeatureSet.cpuPartHex = 0\n\t\t\t\tlog.Warningf(\"Could not parse CPU part value %v: %v\", splitPart[1], err)\n\t\t\t}\n\t\tcase strings.Contains(line, \"CPU revision\"):\n\t\t\tsplitRev := strings.Split(line, \":\")\n\t\t\tif len(splitRev) < 2 {\n\t\t\t\tlog.Warningf(\"Could not read /proc/cpuinfo: malformed CPU revision\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// If there was a problem, leave cpuRevDec as 0.\n\t\t\tvar err error\n\t\t\thostFeatureSet.cpuRevDec, err = strconv.ParseUint(strings.TrimSpace(splitRev[1]), 0, 64)\n\t\t\tif err != nil {\n\t\t\t\thostFeatureSet.cpuRevDec = 0\n\t\t\t\tlog.Warningf(\"Could not parse CPU revision value %v: %v\", splitRev[1], err)\n\t\t\t}\n\t\t}\n\t}\n}",
"func ProcessorInfo() (hardware string, processors []Processor, err error) {\n\tdata, err := ioutil.ReadFile(\"/proc/cpuinfo\")\n\tif err != nil {\n\t\treturn\n\t}\n\treturn parseCpuInfo(data)\n}",
"func readCPUInfo() ([]cpuInfo, error) {\n\tcpuinfo, err := ioutil.ReadFile(\"/proc/cpuinfo\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcpus := strings.Split(string(cpuinfo), \"\\n\\n\")\n\tr := make([]cpuInfo, len(cpus))\n\tfor i, cpu := range cpus {\n\t\tif strings.TrimSpace(cpu) == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tm := map[string]string{}\n\t\tfor _, line := range strings.Split(cpu, \"\\n\") {\n\t\t\tc := strings.Split(line, \":\")\n\t\t\tif len(c) == 2 {\n\t\t\t\tkey := strings.TrimSpace(c[0])\n\t\t\t\tvalue := strings.TrimSpace(c[1])\n\t\t\t\tm[key] = value\n\t\t\t}\n\t\t}\n\n\t\tst := reflect.TypeOf(r[i])\n\t\tobj := reflect.ValueOf(&r[i]).Elem()\n\t\tfor i := 0; i < st.NumField(); i++ {\n\t\t\ttag := st.Field(i).Tag.Get(\"cpuinfo\")\n\t\t\tvalue, ok := m[tag]\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.Errorf(\"Missing value for cpuinfo key %s\", tag)\n\t\t\t}\n\n\t\t\tfield := obj.Field(i)\n\t\t\tkind := field.Kind()\n\t\t\tswitch kind {\n\t\t\tcase reflect.String:\n\t\t\t\tfield.SetString(value)\n\t\t\tcase reflect.Uint:\n\t\t\t\tn, err := strconv.ParseUint(value, 0, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tfield.SetUint(n)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn r, nil\n}",
"func (client *XenClient) HostGetCpuInfo(self string) (result map[string]string, err error) {\n\tobj, err := client.APICall(\"host.get_cpu_info\", self)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinterim := reflect.ValueOf(obj)\n\tresult = map[string]string{}\n\tfor _, key := range interim.MapKeys() {\n\t\tobj := interim.MapIndex(key)\n\t\tresult[key.String()] = obj.String()\n\t}\n\n\treturn\n}",
"func NewCPUInfo() CPUInfo {\n\treturn CPUInfo{\n\t\tCores: NumCPU(),\n\t\t//RamMB: NumRam(),\n\t\tL1CacheLine: int(C.SDL_GetCPUCacheLineSize()),\n\t\tRDTSC: toBool(C.SDL_HasRDTSC()),\n\t\tAltiVec: toBool(C.SDL_HasAltiVec()),\n\t\tMMX: toBool(C.SDL_HasMMX()),\n\t\tHas3DNow: toBool(C.SDL_Has3DNow()),\n\t\tSSE: toBool(C.SDL_HasSSE()),\n\t\tSSE2: toBool(C.SDL_HasSSE2()),\n\t\tSSE3: toBool(C.SDL_HasSSE3()),\n\t\tSSE41: toBool(C.SDL_HasSSE41()),\n\t\tSSE42: toBool(C.SDL_HasSSE42()),\n\t\t//AVX: toBool(C.SDL_HasAVX()),\n\t}\n}",
"func (s *System) CPUUsage() (map[string]float32, error) {\n\tvar dsp, stream, geometry, update, total C.float\n\tres := C.FMOD_System_GetCPUUsage(s.cptr, &dsp, &stream, &geometry, &update, &total)\n\tcpu := map[string]float32{\n\t\t\"dsp\": float32(dsp),\n\t\t\"stream\": float32(stream),\n\t\t\"geometry\": float32(geometry),\n\t\t\"update\": float32(update),\n\t\t\"total\": float32(total),\n\t}\n\treturn cpu, errs[res]\n}",
"func getCPUDetails(log logging.Logger, numaSSDs numaSSDsMap, coresPerNuma int) (numaCoreCountsMap, error) {\n\tif coresPerNuma < 1 {\n\t\treturn nil, errors.Errorf(errInvalNrCores, coresPerNuma)\n\t}\n\n\tnumaCoreCounts := make(numaCoreCountsMap)\n\tfor numaID, ssds := range numaSSDs {\n\t\tcoreCounts, err := checkCPUs(log, len(ssds), coresPerNuma)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnumaCoreCounts[numaID] = coreCounts\n\t}\n\n\treturn numaCoreCounts, nil\n}",
"func TestGetCPUStat(t *testing.T) {\n\t// setup the faking of `cpu.Info()`\n\toldcpuInfo := cpuInfo\n\tcpuInfo = func() ([]cpu.InfoStat, error) {\n\t\tret := []cpu.InfoStat{\n\t\t\t{\n\t\t\t\tVendorID: string(\"vendor\"),\n\t\t\t\tModelName: string(\"model\"),\n\t\t\t\tMhz: float64(100),\n\t\t\t},\n\t\t\t{\n\t\t\t\tVendorID: string(\"vendor\"), // two CPUs --> cpuinfo.count = \"2\"\n\t\t\t\tModelName: string(\"model\"),\n\t\t\t\tMhz: float64(100),\n\t\t\t},\n\t\t}\n\t\treturn ret, nil\n\t}\n\n\t// test\n\texpected := cpuStat{\n\t\tcount: strconv.FormatInt(2, 10),\n\t\tvendorID: \"vendor\",\n\t\tmodelName: \"model\",\n\t\tmhz: strconv.FormatInt(100, 10),\n\t}\n\tactual, err := getCPUStat()\n\n\tassert.NoError(t, err, \"`getCPUStat()` should not have returned an error\")\n\tassert.Equal(t, expected, actual, \"`getCPUStat()` should be equal to main.cpuStat{count:\\\"2\\\", vendorID:\\\"vendor\\\", modelName:\\\"model\\\", mhz:\\\"100\\\"}\")\n\n\t// teardown\n\tcpuInfo = oldcpuInfo\n}",
"func GetCPUModel() ([]cpu.InfoStat, error) {\n\treturn cpu.Info()\n}",
"func GetCPUStats() (*map[string]CPUStats, error) {\n\tprocStats, err := ioutil.ReadFile(\"/proc/stat\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscanner := bufio.NewScanner(bytes.NewReader(procStats))\n\tcStats := make(map[string]CPUStats)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif !strings.HasPrefix(line, \"cpu\") {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.Fields(line)\n\t\tif len(parts) < 8 {\n\t\t\treturn nil, errors.New(\"bad data found in /proc/stat - \" + line)\n\t\t}\n\t\tcStats[parts[0]] = CPUStats{\n\t\t\tUser: parseUint64(parts[1]),\n\t\t\tNice: parseUint64(parts[2]),\n\t\t\tSys: parseUint64(parts[3]),\n\t\t\tIdle: parseUint64(parts[4]),\n\t\t\tWait: parseUint64(parts[5]),\n\t\t\tIrq: parseUint64(parts[6]),\n\t\t\tSoftIrq: parseUint64(parts[7]),\n\t\t\tStolen: parseUint64(parts[8]),\n\t\t}\n\t}\n\treturn &cStats, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use PbStatsSampleFeed.ProtoReflect.Descriptor instead. | func (*PbStatsSampleFeed) Descriptor() ([]byte, []int) {
return file_stats_proto_rawDescGZIP(), []int{0}
} | [
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}",
"func (*PbStatsSampleEntry) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{1}\n}",
"func (*PbStatsSampleValue) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{2}\n}",
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3}\n}",
"func (*StatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{0}\n}",
"func (*FeedbackMetrics) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{12}\n}",
"func (*ProbabilitySampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{2}\n}",
"func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{3}\n}",
"func (*GraphiteStatsdSink) Descriptor() ([]byte, []int) {\n\treturn file_envoy_extensions_stat_sinks_graphite_statsd_v3_graphite_statsd_proto_rawDescGZIP(), []int{0}\n}",
"func (*PbStatsIndexFeed) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{4}\n}",
"func (*Filter_DeprecatedV1) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_base_proto_rawDescGZIP(), []int{8, 0}\n}",
"func (*Telemetry) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_telemetry_proto_rawDescGZIP(), []int{1}\n}",
"func (*SampledData) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_datatypes_proto_rawDescGZIP(), []int{37}\n}",
"func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2}\n}",
"func (*TelemetryMessageBroadcast) Descriptor() ([]byte, []int) {\n\treturn file_cl_offchainreporting_telemetry_proto_rawDescGZIP(), []int{2}\n}",
"func (*ClientStatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{1}\n}",
"func (*KafkaMeter) Descriptor() ([]byte, []int) {\n\treturn file_pkg_sinks_plugin_proto_metrics_proto_rawDescGZIP(), []int{0}\n}",
"func (*SummaryDataPoint) Descriptor() ([]byte, []int) {\n\treturn file_metric_data_proto_rawDescGZIP(), []int{9}\n}",
"func (*StreamingGrpcSink) Descriptor() ([]byte, []int) {\n\treturn file_envoy_service_tap_v2alpha_common_proto_rawDescGZIP(), []int{7}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use PbStatsSampleEntry.ProtoReflect.Descriptor instead. | func (*PbStatsSampleEntry) Descriptor() ([]byte, []int) {
return file_stats_proto_rawDescGZIP(), []int{1}
} | [
"func (*PbStatsSampleValue) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{2}\n}",
"func (*PbStatsSampleFeed) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{0}\n}",
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}",
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3}\n}",
"func (*StatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{0}\n}",
"func (*ProbabilitySampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{2}\n}",
"func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{3}\n}",
"func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2}\n}",
"func (*Trace_EntryStats) Descriptor() ([]byte, []int) {\n\treturn file_model_apitrace_apitrace_proto_rawDescGZIP(), []int{0, 1}\n}",
"func (*StatsEntry) Descriptor() ([]byte, []int) {\n\treturn file_stats_stats_proto_rawDescGZIP(), []int{2}\n}",
"func (*SampledData) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_datatypes_proto_rawDescGZIP(), []int{37}\n}",
"func (*ClientStatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{1}\n}",
"func (*LoadBalancerStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_grpc_test_proto_rawDescGZIP(), []int{9}\n}",
"func (*GetStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{45}\n}",
"func (*DumpStatsEntry) Descriptor() ([]byte, []int) {\n\treturn file_stats_stats_proto_rawDescGZIP(), []int{0}\n}",
"func (*CalculateStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_contactcenterinsights_v1_contact_center_insights_proto_rawDescGZIP(), []int{0}\n}",
"func (*Pic_PortStatistics_Statistic) Descriptor() ([]byte, []int) {\n\treturn file_huaweiV8R12_pic_proto_rawDescGZIP(), []int{0, 0, 0}\n}",
"func (*Span) Descriptor() ([]byte, []int) {\n\treturn file_google_devtools_cloudtrace_v2_trace_proto_rawDescGZIP(), []int{0}\n}",
"func (*Telemetry) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_telemetry_proto_rawDescGZIP(), []int{1}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use PbStatsSampleValue.ProtoReflect.Descriptor instead. | func (*PbStatsSampleValue) Descriptor() ([]byte, []int) {
return file_stats_proto_rawDescGZIP(), []int{2}
} | [
"func (*PbStatsSampleEntry) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{1}\n}",
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}",
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3}\n}",
"func (*StatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{0}\n}",
"func (*PbStatsSampleFeed) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{0}\n}",
"func (*ProbabilitySampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{2}\n}",
"func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{3}\n}",
"func (*AggregatorValueStatsProto) Descriptor() ([]byte, []int) {\n\treturn file_aggregator_proto_rawDescGZIP(), []int{3}\n}",
"func (*TypedValue) Descriptor() ([]byte, []int) {\n\treturn file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0}\n}",
"func (*ConstantSampler) Descriptor() ([]byte, []int) {\n\treturn file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2}\n}",
"func (*SampledData) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_datatypes_proto_rawDescGZIP(), []int{37}\n}",
"func (*ClientStatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{1}\n}",
"func (*NumericValue) Descriptor() ([]byte, []int) {\n\treturn file_google_analytics_admin_v1alpha_access_report_proto_rawDescGZIP(), []int{10}\n}",
"func (*Pic_PortStatistics_Statistic) Descriptor() ([]byte, []int) {\n\treturn file_huaweiV8R12_pic_proto_rawDescGZIP(), []int{0, 0, 0}\n}",
"func (*ValueSetWarning) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_stu3_extensions_proto_rawDescGZIP(), []int{374}\n}",
"func (*GetStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{45}\n}",
"func (*KafkaMeter) Descriptor() ([]byte, []int) {\n\treturn file_pkg_sinks_plugin_proto_metrics_proto_rawDescGZIP(), []int{0}\n}",
"func (*TargetValue) Descriptor() ([]byte, []int) {\n\treturn file_packetbroker_api_iam_v1_service_proto_rawDescGZIP(), []int{7}\n}",
"func (*Metrics) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{0}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use PbStatsIndexList.ProtoReflect.Descriptor instead. | func (*PbStatsIndexList) Descriptor() ([]byte, []int) {
return file_stats_proto_rawDescGZIP(), []int{3}
} | [
"func (*PbStatsIndexFeed) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{4}\n}",
"func (*BulkIndexRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{14}\n}",
"func (*BulkIndexResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{15}\n}",
"func (*Index) Descriptor() ([]byte, []int) {\n\treturn file_index_faults_rpc_rpc_proto_rawDescGZIP(), []int{0}\n}",
"func (*List) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{77}\n}",
"func (*List) Descriptor() ([]byte, []int) {\n\treturn file_proto_ssql_proto_rawDescGZIP(), []int{11}\n}",
"func (*ClientStatsBucket) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{2}\n}",
"func (*MemberStatisticsInfoListReq) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{101}\n}",
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}",
"func (*MemberStatisticsInfoListResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{103}\n}",
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3}\n}",
"func (*ServiceAccountList) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{181}\n}",
"func (Index_HashFunc) EnumDescriptor() ([]byte, []int) {\n\treturn file_index_proto_rawDescGZIP(), []int{0, 0}\n}",
"func (*ListMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_go_chromium_org_luci_analysis_proto_v1_metrics_proto_rawDescGZIP(), []int{0}\n}",
"func (*CMsgBattleReportInfoList) Descriptor() ([]byte, []int) {\n\treturn file_dota_gcmessages_client_battle_report_proto_rawDescGZIP(), []int{5}\n}",
"func (*Filter_DeprecatedV1) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_base_proto_rawDescGZIP(), []int{8, 0}\n}",
"func (*ListMetricsRequest) Descriptor() ([]byte, []int) {\n\treturn file_alameda_api_v1alpha1_datahub_metrics_services_proto_rawDescGZIP(), []int{19}\n}",
"func (*GetStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{45}\n}",
"func (*StatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{0}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use PbStatsIndexFeed.ProtoReflect.Descriptor instead. | func (*PbStatsIndexFeed) Descriptor() ([]byte, []int) {
return file_stats_proto_rawDescGZIP(), []int{4}
} | [
"func (*PbStatsIndexList) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{3}\n}",
"func (*PbStatsSampleFeed) Descriptor() ([]byte, []int) {\n\treturn file_stats_proto_rawDescGZIP(), []int{0}\n}",
"func (*Filter_DeprecatedV1) Descriptor() ([]byte, []int) {\n\treturn file_xds_envoy_base_proto_rawDescGZIP(), []int{8, 0}\n}",
"func (*StatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{0}\n}",
"func (*BulkIndexRequest) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{14}\n}",
"func (*ClusterStatisticsApi) Descriptor() ([]byte, []int) {\n\treturn file_lightbits_api_duros_v1_statisticsapi_proto_rawDescGZIP(), []int{0}\n}",
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_github_com_solo_io_gloo_projects_gloo_api_external_envoy_config_trace_v3_opencensus_proto_rawDescGZIP(), []int{4}\n}",
"func (*Index) Descriptor() ([]byte, []int) {\n\treturn file_index_faults_rpc_rpc_proto_rawDescGZIP(), []int{0}\n}",
"func (*ClientStatsBucket) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{2}\n}",
"func (*RateLimitingSampler) Descriptor() ([]byte, []int) {\n\treturn file_opencensus_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3}\n}",
"func (*ClientStatsPayload) Descriptor() ([]byte, []int) {\n\treturn file_datadog_trace_stats_proto_rawDescGZIP(), []int{1}\n}",
"func (*BulkIndexResponse) Descriptor() ([]byte, []int) {\n\treturn file_protobuf_index_proto_rawDescGZIP(), []int{15}\n}",
"func (*NodeStatisticsApi) Descriptor() ([]byte, []int) {\n\treturn file_lightbits_api_duros_v1_statisticsapi_proto_rawDescGZIP(), []int{1}\n}",
"func (*FeedbackMetrics) Descriptor() ([]byte, []int) {\n\treturn file_ssn_dataservice_v1_dataservice_proto_rawDescGZIP(), []int{12}\n}",
"func (*GetStatsResponse) Descriptor() ([]byte, []int) {\n\treturn file_github_com_yahuizhan_dappley_metrics_go_api_rpc_pb_rpc_proto_rawDescGZIP(), []int{45}\n}",
"func (*IndexValue) Descriptor() ([]byte, []int) {\n\treturn file_openfeed_proto_rawDescGZIP(), []int{23}\n}",
"func (*MemberStatisticsInfoUpdateResp) Descriptor() ([]byte, []int) {\n\treturn file_ums_proto_rawDescGZIP(), []int{105}\n}",
"func (*Metrics) Descriptor() ([]byte, []int) {\n\treturn file_grpc_proto_rawDescGZIP(), []int{0}\n}",
"func (*CalculateStatsRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_contactcenterinsights_v1_contact_center_insights_proto_rawDescGZIP(), []int{0}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewTree yields a tree corresponding to the given list of symbol frequencies | func NewTree(fs []SymbolFreq) Tree {
// Sort frequencies
sort.Sort(byFreq(fs))
wrkList := []node{}
for _, f := range fs {
wrkList = append(wrkList, f)
}
for {
if len(wrkList) < 2 {
break
}
newNode := makeNewNode(wrkList[0], wrkList[1])
wrkList = insertItem(wrkList[2:], newNode)
}
return Tree{wrkList[0]}
} | [
"func NewEncodingTree(freq map[uint8]uint) *Node {\n\tvar head Node // Fictitious head\n\n\tfor i, v := range freq {\n\t\tnode := &Node{\n\t\t\tvalue: i,\n\t\t\tweight: v,\n\t\t}\n\t\thead.insert(node)\n\t}\n\n\tfor head.next != nil && head.next.next != nil {\n\t\tl := head.popFirst()\n\t\tr := head.popFirst()\n\n\t\tnode := join(l, r)\n\t\thead.insert(node)\n\t}\n\n\t// Fictitious head point to tree root\n\tif head.next != nil {\n\t\thead.next.prev = nil\n\t}\n\treturn head.next\n}",
"func newt(terms []string) Tree {\n\tkvs := make([]kv, 0, len(terms))\n\tfor i, k := range terms {\n\t\tkvs = append(kvs, kv{[]byte(k), i})\n\t}\n\tsort.Slice(kvs, func(i, j int) bool {\n\t\ta, b := kvs[i].k, kvs[j].k\n\t\tfor i := 0; i < len(a) && i < len(b); i++ {\n\t\t\tif a[i] == b[i] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn a[i] < b[i]\n\t\t}\n\t\treturn len(a) < len(b)\n\t})\n\n\tt := Tree{node{next: 1}}\n\n\tt = t.construct(kvs, 0, 0)\n\treturn t\n}",
"func main() {\r\n\ttest := \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\n\tsymFreqs := make(map[rune]int)\r\n\t// read each symbol and record the frequencies\r\n\tfor _, c := range test {\r\n\t\tsymFreqs[c]++\r\n\t}\r\n\r\n\t// example tree\r\n\texampleTree := buildTree(symFreqs)\r\n\r\n\t// print out results\r\n\tfmt.Println(\"SYMBOL\\tWEIGHT\\tHUFFMAN CODE\")\r\n\tprintCodes(exampleTree, []byte{})\r\n}",
"func buildPrefixTree(byteFrequencies *dictionary.Dictionary) *huffmanTreeNode {\n\ttree := new(priorityqueue.PriorityQueue)\n\tkeys := byteFrequencies.Keys()\n\n\tfor i := 0; i < keys.Size(); i++ {\n\t\tbyt := keys.MustGet(i)\n\t\tfrequency, _ := byteFrequencies.Get(byt)\n\n\t\ttree.Enqueue(frequency.(int), &huffmanTreeNode{frequency: frequency.(int), value: byt.(byte)})\n\t}\n\n\tfor tree.Size() > 1 {\n\t\taPrio, a := tree.Dequeue()\n\t\tbPrio, b := tree.Dequeue()\n\n\t\tnewPrio := aPrio + bPrio\n\n\t\tnode := &huffmanTreeNode{frequency: newPrio, left: a.(*huffmanTreeNode), right: b.(*huffmanTreeNode)}\n\n\t\ttree.Enqueue(newPrio, node)\n\t}\n\n\t_, root := tree.Dequeue()\n\n\treturn root.(*huffmanTreeNode)\n}",
"func newTree(segmentSize, depth int, hashfunc func() hash.Hash) *tree {\n\tn := newNode(0, nil, hashfunc())\n\tprevlevel := []*node{n}\n\t// iterate over levels and creates 2^(depth-level) nodes\n\t// the 0 level is on double segment sections so we start at depth - 2 since\n\tcount := 2\n\tfor level := depth - 2; level >= 0; level-- {\n\t\tnodes := make([]*node, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tparent := prevlevel[i/2]\n\t\t\tvar hasher hash.Hash\n\t\t\tif level == 0 {\n\t\t\t\thasher = hashfunc()\n\t\t\t}\n\t\t\tnodes[i] = newNode(i, parent, hasher)\n\t\t}\n\t\tprevlevel = nodes\n\t\tcount *= 2\n\t}\n\t// the datanode level is the nodes on the last level\n\treturn &tree{\n\t\tleaves: prevlevel,\n\t\tresult: make(chan []byte),\n\t\tsection: make([]byte, 2*segmentSize),\n\t}\n}",
"func newTree(segmentSize, maxsize, depth int, hashfunc func() hash.Hash) *tree {\n\tn := newNode(0, nil, hashfunc())\n\tprevlevel := []*node{n}\n\t// iterate over levels and creates 2^(depth-level) nodes\n\t// the 0 level is on double segment sections so we start at depth - 2\n\tcount := 2\n\tfor level := depth - 2; level >= 0; level-- {\n\t\tnodes := make([]*node, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tparent := prevlevel[i/2]\n\t\t\tnodes[i] = newNode(i, parent, hashfunc())\n\t\t}\n\t\tprevlevel = nodes\n\t\tcount *= 2\n\t}\n\t// the datanode level is the nodes on the last level\n\treturn &tree{\n\t\tleaves: prevlevel,\n\t\tbuffer: make([]byte, maxsize),\n\t}\n}",
"func NewTree(width int, company string) *Tree {\n\theight := width / 2\n\n\tleaves := make([][]string, height)\n\n\tfor i := 0; i < height; i++ {\n\t\tleaves[i] = newLevelLeaves(width, \" \")\n\t\tif i == 0 {\n\t\t\tleaves[i][width/2] = \"★\"\n\t\t\tcontinue\n\t\t}\n\n\t\tleaves[i][height-i] = \"/\"\n\t\tleaves[i][height+i] = \"\\\\\"\n\t\tfor j := (height - i + 1); j < height+i; j++ {\n\t\t\tleaves[i][j] = leafContent()\n\t\t}\n\t}\n\n\tleaves = append(leaves, bottomLeaves(width, \"^\"), bottomLeaves(width, \" \"))\n\n\treturn &Tree{\n\t\tleaves: leaves,\n\t\tcompany: company,\n\t}\n}",
"func NewTree(childs Childs) *Quadtree {\n\tqt, ok := nodeMap[childs]\n\tif ok {\n\t\tcacheHit++\n\t\treturn qt\n\t}\n\tcacheMiss++\n\tqt = &Quadtree{childs.NE.Level + 1, childs, childs.population(), nil}\n\tif qt.Population == 0 || qt.Level <= 16 {\n\t\tnodeMap[childs] = qt\n\t}\n\treturn qt\n}",
"func InitializeTree(speciesNames []string) Tree {\n\tnumLeaves := len(speciesNames)\n\tvar t Tree = make([]*Node, 2*numLeaves-1)\n\n\t//create our 2n-1 nodes, and assign labels (no children yet)\n\tfor i := range t {\n\t\t// create a node (default age: 0)\n\t\tvar vx Node\n\t\tif i < numLeaves { // set the species name of leaves\n\t\t\tvx.label = speciesNames[i]\n\t\t} else { // set internal node names to their integer label\n\t\t\tvx.label = \"Ancestor Species \" + strconv.Itoa(i)\n\t\t}\n\t\t// point t[i] at current Node\n\t\tt[i] = &vx\n\t}\n\treturn t\n}",
"func NewTree(pattern string, handlers []baa.HandlerFunc) *Tree {\n\tif pattern == \"\" {\n\t\tpanic(\"tree.new: pattern can be empty\")\n\t}\n\treturn &Tree{\n\t\tstatic: true,\n\t\talpha: pattern[0],\n\t\tpattern: pattern,\n\t\tformat: []byte(pattern),\n\t\thandlers: handlers,\n\t}\n}",
"func newTree() *tree {\n\treturn &tree{Index: 0}\n}",
"func (d *decoder) createTree() *node {\n\tif val, _ := readBit(d.r); val {\n\t\treturn &node{readByte(d.r), -1, false, nil, nil}\n\t} else if d.numChars != d.numCharsDecoded {\n\t\tleft := d.createTree()\n\t\tright := d.createTree()\n\t\treturn &node{0, -1, true, left, right}\n\t}\n\n\treturn nil\n}",
"func NewWildcardTree(twc []byte, h func(data ...[]byte) []byte,\n\tm map[string]interface{}) *WildcardTree {\n\twt := new(WildcardTree)\n\t// Order key-value pairs in radix order, creating a Merkle tree and saving\n\t// the resulting indices in a new (final) radix tree for easy look-up\n\tr := radix.NewFromMap(m)\n\ttmp, index := make(map[string]interface{}), 0\n\tvar data [][]byte\n\tr.WalkPrefix(\"\", func(k string, v interface{}) bool {\n\t\tp, ok := v.([][]byte)\n\t\tif !ok {\n\t\t\tpanic(\"This should never happen given the function's precondition\")\n\t\t}\n\t\ttmp[k], index = radixValue{payload: p, index: index}, index+1\n\t\tdata = append(data, append([]byte(k), h(p...)...))\n\t\treturn false\n\t})\n\twt.r = radix.NewFromMap(tmp)\n\twt.mt = NewMerkleTree(twc, leafPrefix, interiorPrefix, h, data)\n\treturn wt\n}",
"func New(a []int) *PTree {\n\tn := len(a)\n\tb := make([]int, n)\n\tfor i := 0; i < n; i++ {\n\t\tb[i] = i\n\t}\n\tsort.Sort(&arrayPerm{a, b})\n\tt := &PTree{}\n\tt.a = a\n\tt.n = len(a)\n\tt.root = make([]*node, n+1)\n\tt.root[n] = t.build(b)\n\tfor i := n - 1; i >= 0; i-- {\n\t\tt.root[i] = t.del(t.root[i+1], i)\n\t}\n\treturn t\n}",
"func newEngine(weights []int, eof Symbol) *engine {\n\t//DEBUG\n\t//fmt.Println(\"Constructing new engine with freq : \", weights)\n\n\te := new(engine)\n\te.freq = weights\n\te.actfreq = append([]int(nil), weights...) // deep copy ...\n\te.eof = eof\n\te.len = len(weights)\n\te.nodes = make([]node, 2*e.len-1, 2*e.len-1)\n\tfor i := range e.nodes {\n\t\te.nodes[i].id = i\n\t\tif i < e.len {\n\t\t\te.nodes[i].weight = uint(e.freq[i])\n\t\t}\n\t}\n\te.makeTree()\n\treturn e\n}",
"func CreateBinaryTree() {\n\tfmt.Fprintln(os.Stderr, \"CreateBinaryTree\")\n\tvar min1i, min2i, pos1, pos2 int\n\tvar point []int = make([]int, MAX_CODE_LENGTH)\n\tvar code []byte = make([]byte, MAX_CODE_LENGTH)\n\tvar count []int64 = make([]int64, vocab_size*2+1)\n\tvar binaryt []int = make([]int, vocab_size*2+1)\n\tvar parent_node []int = make([]int, vocab_size*2+1)\n\tfor a := 0; a < vocab_size; a++ {\n\t\tcount[a] = int64(vocab[a].cn)\n\t}\n\tfor a := vocab_size; a < vocab_size*2; a++ {\n\t\tcount[a] = 1e15\n\t}\n\tpos1 = vocab_size - 1\n\tpos2 = vocab_size\n\t// Following algorithm constructs the Huffman tree by adding one node at a time\n\tfor a := 0; a < vocab_size-1; a++ {\n\t\t// First, find two smallest nodes 'min1, min2'\n\t\tif pos1 >= 0 {\n\t\t\tif count[pos1] < count[pos2] {\n\t\t\t\tmin1i = pos1\n\t\t\t\tpos1--\n\t\t\t} else {\n\t\t\t\tmin1i = pos2\n\t\t\t\tpos2++\n\t\t\t}\n\t\t} else {\n\t\t\tmin1i = pos2\n\t\t\tpos2++\n\t\t}\n\t\tif pos1 >= 0 {\n\t\t\tif count[pos1] < count[pos2] {\n\t\t\t\tmin2i = pos1\n\t\t\t\tpos1--\n\t\t\t} else {\n\t\t\t\tmin2i = pos2\n\t\t\t\tpos2++\n\t\t\t}\n\t\t} else {\n\t\t\tmin2i = pos2\n\t\t\tpos2++\n\t\t}\n\t\tcount[vocab_size+a] = count[min1i] + count[min2i]\n\t\tparent_node[min1i] = vocab_size + a\n\t\tparent_node[min2i] = vocab_size + a\n\t\tbinaryt[min2i] = 1\n\t}\n\t// Now assign binary code to each vocabulary character\n\tfor a := 0; a < vocab_size; a++ {\n\t\tb := a\n\t\ti := 0\n\t\tfor {\n\t\t\tcode[i] = byte(binaryt[b])\n\t\t\tpoint[i] = b\n\t\t\ti++\n\t\t\tb = parent_node[b]\n\t\t\tif b == vocab_size*2-2 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvocab[a].codelen = byte(i)\n\t\tvocab[a].point[0] = vocab_size - 2\n\t\tfor b = 0; b < i; b++ {\n\t\t\tvocab[a].code[i-b-1] = code[b]\n\t\t\tvocab[a].point[i-b] = point[b] - vocab_size\n\t\t}\n\t}\n}",
"func CreateTree(parties []int, B int, lambda int) []Node {\n\tnodes := make([]Node, (2*B)-1) //create length based on B\n\tfor i := 0; i < len(nodes); i++ {\n\t\tpath, nonces := CreatePath(parties, int(math.Pow(math.Log2(float64(lambda)), 2))) //use path for each node\n\t\tnodes[i].Path = path\n\t\tnodes[i].Nonces = nonces\n\t\t//assigns nodes\n\t}\n\tfactor := 0 //this makes the right parent index\n\tpivotNode := CalculatePivotNode(B)\n\tfor i := 0; i < pivotNode; i++ {\n\t\tnodes[i].Parent = &nodes[B+factor] //so the parent is the right node, and last is null\n\t\tif i%2 == 1 {\n\t\t\tfactor += 1\n\t\t}\n\t}\n\treturn nodes\n\n}",
"func CreateTree(values map[string]cty.Value) *Node {\n\trootNode := &Node{\n\t\tChildren: map[string]*Node{},\n\t}\n\n\tfor name, value := range values {\n\t\tnode := rootNode.getNodePath(name)\n\t\tnode.Value = value\n\t}\n\n\treturn rootNode\n}",
"func (t *HashTreap) newPrunedHashTreap(nodes []ProofNode) {\n\tt.root = t.addProofNode(t.root, &nodes[0])\n\tfor i := 1; i < len(nodes); i++ {\n\t\tt.addProofNode(t.root, &nodes[i])\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewTreeFromBS yields a tree from a bitstream encoding of a tree | func NewTreeFromBS(bs *bitstream.BitStream) Tree {
root := newTreeFromBS(bs)
return Tree{root: root}
} | [
"func NewTree() *BPTree {\n\treturn &BPTree{LastAddress: 0, keyPosMap: make(map[string]int64), enabledKeyPosMap: false}\n}",
"func NewBTree(\n\tctx context.Context,\n\tobjStore *objstore.ObjectStore,\n\tencConf pbobject.EncryptionConfig,\n) (*BTree, error) {\n\trootNode := &Node{}\n\trootNode.Leaf = true\n\trootRef, _, err := objStore.StoreObject(ctx, rootNode, encConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbt := &BTree{\n\t\tobjStore: objStore,\n\t\tencConf: encConf,\n\t\tfreeList: sync.Pool{New: func() interface{} { return &Node{} }},\n\t}\n\n\trootMemNod := bt.newNode()\n\trootMemNod.node = rootNode\n\tbt.root = rootMemNod\n\n\trootNod := &Root{\n\t\tRootNodeRef: rootRef,\n\t}\n\trootNodRef, _, err := objStore.StoreObject(ctx, rootNod, encConf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbt.rootNod = rootNod\n\tbt.rootNodRef = rootNodRef\n\n\treturn bt, nil\n}",
"func (p *Contentity) st2b_BuildIntoTree() *Contentity {\n\tif p.HasError() {\n\t\treturn p\n\t}\n\tvar e error\n\tp.GTree, e = gtree.NewGTreeFromGTags(p.GTags)\n\tif e != nil {\n\t\tprintln(\"==> mcfl.st2b: Error!:\", e.Error())\n\t\tp.WrapError(\"NewGTreeFromGTags\", e)\n\t\treturn p\n\t}\n\tif p.GTree == nil {\n\t\tprintln(\"==> mcfl.st2b: got nil Gtree: %s\", e.Error())\n\t\tp.WrapError(\"nil tree from NewGTreeFromGTags\", e)\n\t}\n\tif p.GTree != nil && p.GTreeWriter != nil &&\n\t\tp.GTreeWriter != io.Discard {\n\t\tgtoken.DumpTo(p.GTokens, p.GTreeWriter)\n\t} else {\n\t\tgtoken.DumpTo(p.GTokens, os.Stdout)\n\t}\n\treturn p\n}",
"func (d *decoder) createTree() *node {\n\tif val, _ := readBit(d.r); val {\n\t\treturn &node{readByte(d.r), -1, false, nil, nil}\n\t} else if d.numChars != d.numCharsDecoded {\n\t\tleft := d.createTree()\n\t\tright := d.createTree()\n\t\treturn &node{0, -1, true, left, right}\n\t}\n\n\treturn nil\n}",
"func NewEncodingTree(freq map[uint8]uint) *Node {\n\tvar head Node // Fictitious head\n\n\tfor i, v := range freq {\n\t\tnode := &Node{\n\t\t\tvalue: i,\n\t\t\tweight: v,\n\t\t}\n\t\thead.insert(node)\n\t}\n\n\tfor head.next != nil && head.next.next != nil {\n\t\tl := head.popFirst()\n\t\tr := head.popFirst()\n\n\t\tnode := join(l, r)\n\t\thead.insert(node)\n\t}\n\n\t// Fictitious head point to tree root\n\tif head.next != nil {\n\t\thead.next.prev = nil\n\t}\n\treturn head.next\n}",
"func BTreeCreate(t int) *BTree {\n\t// create null node to use as place filler\n\tnullNode := &BTreeNode{}\n\tfor i := 0; i < 2*t; i++ {\n\t\tnullNode.children = append(nullNode.children, nullNode)\n\t}\n\n\t// create the tree\n\ttree := BTree{\n\t\tt: t,\n\t\tnullNode: nullNode,\n\t}\n\n\t// create root node\n\tx := tree.AllocateNode()\n\tx.leaf = true\n\ttree.root = x\n\n\t// create null node used to auto-populate children of newly allocated nodes\n\ttree.nullNode = tree.AllocateNode()\n\n\t// *Here is where we'd write the new node to disk\n\treturn &tree\n}",
"func NewTreeFromState(data io.Reader) (*Tree, error) {\n\tidx := &Tree{\n\t\tnewBlocks: make(chan int),\n\t\tdone: make(chan bool),\n\t\tblockMap: make(map[int]int),\n\t}\n\tif err := idx.loadState(data); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed loading index state : %v\", err)\n\t}\n\tgo idx.blockAllocator()\n\treturn idx, nil\n}",
"func (b *builder) tree() *Tree {\n\tif len(b.stack) != 1 {\n\t\t// The parser failed to produce exactly one node, adding one.\n\t\tb.addNode(node.BrokenFile, 0, len(b.source))\n\t}\n\n\t// Add root to buffer.\n\tindex := b.addNodeToBuffer(&b.stack[0], 0)\n\t// The root has no parent.\n\tb.setParents(index, 0)\n\t// Add size of the root node to the buffer.\n\tb.buffer = append(b.buffer, byte(len(b.buffer)-index))\n\n\treturn &Tree{\n\t\tbuffer: b.buffer,\n\t\tlang: b.lang,\n\t\tpath: b.path,\n\t\tsource: b.source,\n\t\tmapper: offset.NewMapper(b.source),\n\t\tt: b.opts.Type,\n\t}\n}",
"func NewBTree() (btree *BTree) {\n\tbtree = &BTree{}\n\treturn\n}",
"func New() *binaryTree {\n\treturn CreateDefaultTree()\n}",
"func (t Tree) AsBitstream() bitstream.BitStream {\n\tresult := bitstream.BitStream{}\n\tt.asBitstream(&result, t.root)\n\treturn result\n}",
"func NewBinaryTree2(list []int) BinaryTree {\n\tif len(list) == 0 {\n\t\tfmt.Printf(\"list can not be empty: %v\", list)\n\t\tos.Exit(1)\n\t}\n\n\troot := Node{id: 0, value: list[0], children: make([]Node, 2)}\n\tqueue := []Node{root} // list of potential foster parents who can adopt child nodes\n\n\tconst limit = 2\n\tcount := 0\n\tfor i, v := range list[1:] {\n\t\tif count == limit {\n\t\t\tlogger.Printf(\"%+v\", queue[0])\n\t\t\tqueue = queue[1:]\n\t\t\tcount = 0\n\t\t}\n\n\t\tparent := &queue[0]\n\t\tnode := Node{id: i, value: v, children: make([]Node, 2)}\n\t\tparent.children[count] = node\n\n\t\tqueue = append(queue, node)\n\t\tcount++\n\t}\n\n\treturn BinaryTree{root: &root}\n}",
"func NewObjectTree(flags byte) *ObjectTree { return new(ObjectTree).Init(flags) }",
"func BinaryTreeFactory() *BinaryTree {\n\treturn &BinaryTree{0, 1, 0, make([]*Integer, 1)}\n}",
"func testNewBTree() *BTreeG[testKind] {\n\treturn NewBTreeG(testLess)\n}",
"func fromURL(baseURL string) (*btree.BTree, error) {\n\t// Get MD5 sum for tar.gz file\n\tasnMd5URL := baseURL + \"/\" + asnMd5File\n\tresp, err := http.Get(asnMd5URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmd5Sum, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp.Body.Close()\n\n\tasnURL := baseURL + \"/\" + asnFile\n\t// Load the tar.gz file\n\tresp, err = http.Get(asnURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"%s status %d\", asnURL, resp.StatusCode)\n\t}\n\n\tbodyData, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Build the MD5 sum of the downloaded tar.gz\n\thash := md5.New()\n\tif _, err := io.Copy(hash, bytes.NewReader(bodyData)); err != nil {\n\t\treturn nil, err\n\t}\n\tif string(md5Sum) != hex.EncodeToString(hash.Sum(nil)) {\n\t\tlog.Println(\"asndb checksum mismatch\")\n\t\treturn nil, fmt.Errorf(\"checksum mismatch: %s != %s\", md5Sum, hash.Sum(nil))\n\t}\n\n\t// Copy the data to a temporary file for zip to be able to open it\n\ttmpF, err := ioutil.TempFile(\"/tmp\", \"asndb-\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.Remove(tmpF.Name())\n\n\tio.Copy(tmpF, bytes.NewReader(bodyData))\n\ttmpF.Close()\n\n\treturn fromFile(tmpF.Name())\n}",
"func NewTree(db *badger.Storage, root []byte) *Tree {\n\tt := &Tree{\n\t\tdb: newTreeDb(db),\n\t}\n\tt.cache = lru.NewCache(2048)\n\tvar zero [32]byte\n\tif root != nil && len(root) == int(32) && bytes.Compare(root, zero[:]) > common.Zero {\n\t\tt.root = t.mustLoadNode(root)\n\t}\n\n\tif err := FileExist(); err == nil {\n\t\tt.BackCommit()\n\t}\n\n\treturn t\n}",
"func CreateTree(parties []int, B int, lambda int) []Node {\n\tnodes := make([]Node, (2*B)-1) //create length based on B\n\tfor i := 0; i < len(nodes); i++ {\n\t\tpath, nonces := CreatePath(parties, int(math.Pow(math.Log2(float64(lambda)), 2))) //use path for each node\n\t\tnodes[i].Path = path\n\t\tnodes[i].Nonces = nonces\n\t\t//assigns nodes\n\t}\n\tfactor := 0 //this makes the right parent index\n\tpivotNode := CalculatePivotNode(B)\n\tfor i := 0; i < pivotNode; i++ {\n\t\tnodes[i].Parent = &nodes[B+factor] //so the parent is the right node, and last is null\n\t\tif i%2 == 1 {\n\t\t\tfactor += 1\n\t\t}\n\t}\n\treturn nodes\n\n}",
"func main() {\n\tcodec := Constructor()\n\troot := &TreeNode{3, &TreeNode{4, &TreeNode{6, nil, nil}, nil},\n\t\t&TreeNode{5, &TreeNode{7, nil, nil}, nil}}\n\tdata := codec.serialize(root)\n\tprintln(data)\n\tnewRoot := codec.deserialize(data)\n\tprintln(codec.serialize(newRoot))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Dictionary returns the dictionary defined by this Huffman tree | func (t Tree) Dictionary() DictionaryType {
result := DictionaryType{}
t.buildDictionary(result, bitstream.BitStream{}, t.root)
return result
} | [
"func CreateBinaryTree() {\n\tfmt.Fprintln(os.Stderr, \"CreateBinaryTree\")\n\tvar min1i, min2i, pos1, pos2 int\n\tvar point []int = make([]int, MAX_CODE_LENGTH)\n\tvar code []byte = make([]byte, MAX_CODE_LENGTH)\n\tvar count []int64 = make([]int64, vocab_size*2+1)\n\tvar binaryt []int = make([]int, vocab_size*2+1)\n\tvar parent_node []int = make([]int, vocab_size*2+1)\n\tfor a := 0; a < vocab_size; a++ {\n\t\tcount[a] = int64(vocab[a].cn)\n\t}\n\tfor a := vocab_size; a < vocab_size*2; a++ {\n\t\tcount[a] = 1e15\n\t}\n\tpos1 = vocab_size - 1\n\tpos2 = vocab_size\n\t// Following algorithm constructs the Huffman tree by adding one node at a time\n\tfor a := 0; a < vocab_size-1; a++ {\n\t\t// First, find two smallest nodes 'min1, min2'\n\t\tif pos1 >= 0 {\n\t\t\tif count[pos1] < count[pos2] {\n\t\t\t\tmin1i = pos1\n\t\t\t\tpos1--\n\t\t\t} else {\n\t\t\t\tmin1i = pos2\n\t\t\t\tpos2++\n\t\t\t}\n\t\t} else {\n\t\t\tmin1i = pos2\n\t\t\tpos2++\n\t\t}\n\t\tif pos1 >= 0 {\n\t\t\tif count[pos1] < count[pos2] {\n\t\t\t\tmin2i = pos1\n\t\t\t\tpos1--\n\t\t\t} else {\n\t\t\t\tmin2i = pos2\n\t\t\t\tpos2++\n\t\t\t}\n\t\t} else {\n\t\t\tmin2i = pos2\n\t\t\tpos2++\n\t\t}\n\t\tcount[vocab_size+a] = count[min1i] + count[min2i]\n\t\tparent_node[min1i] = vocab_size + a\n\t\tparent_node[min2i] = vocab_size + a\n\t\tbinaryt[min2i] = 1\n\t}\n\t// Now assign binary code to each vocabulary character\n\tfor a := 0; a < vocab_size; a++ {\n\t\tb := a\n\t\ti := 0\n\t\tfor {\n\t\t\tcode[i] = byte(binaryt[b])\n\t\t\tpoint[i] = b\n\t\t\ti++\n\t\t\tb = parent_node[b]\n\t\t\tif b == vocab_size*2-2 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvocab[a].codelen = byte(i)\n\t\tvocab[a].point[0] = vocab_size - 2\n\t\tfor b = 0; b < i; b++ {\n\t\t\tvocab[a].code[i-b-1] = code[b]\n\t\t\tvocab[a].point[i-b] = point[b] - vocab_size\n\t\t}\n\t}\n}",
"func buildPrefixTree(byteFrequencies *dictionary.Dictionary) *huffmanTreeNode {\n\ttree := new(priorityqueue.PriorityQueue)\n\tkeys := byteFrequencies.Keys()\n\n\tfor i := 0; i < keys.Size(); i++ {\n\t\tbyt := keys.MustGet(i)\n\t\tfrequency, _ := byteFrequencies.Get(byt)\n\n\t\ttree.Enqueue(frequency.(int), &huffmanTreeNode{frequency: frequency.(int), value: byt.(byte)})\n\t}\n\n\tfor tree.Size() > 1 {\n\t\taPrio, a := tree.Dequeue()\n\t\tbPrio, b := tree.Dequeue()\n\n\t\tnewPrio := aPrio + bPrio\n\n\t\tnode := &huffmanTreeNode{frequency: newPrio, left: a.(*huffmanTreeNode), right: b.(*huffmanTreeNode)}\n\n\t\ttree.Enqueue(newPrio, node)\n\t}\n\n\t_, root := tree.Dequeue()\n\n\treturn root.(*huffmanTreeNode)\n}",
"func buildPatternDict(root *Node) (map[byte]ByteSeq, error) {\n\tleafNodes := make([]*Node, 0)\n\troot.InOrderTraversal(func(n *Node) {\n\t\tif n.IsLeaf() {\n\t\t\tleafNodes = append(leafNodes, n)\n\t\t}\n\t})\n\n\tdict := make(map[byte]ByteSeq)\n\tfor i := 0; i < len(leafNodes); i++ {\n\n\t\t// traverse to root\n\t\tvar byteSeq ByteSeq\n\t\tn := leafNodes[i]\n\t\tfor n != nil {\n\t\t\tp := n.parent\n\t\t\tif p == nil {\n\t\t\t\t// we are at the root, stop processing\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// add a bit depending on the if we are the left or right child\n\t\t\tif p.left == n {\n\t\t\t\tbyteSeq.AddBit(0)\n\t\t\t} else if p.right == n {\n\t\t\t\tbyteSeq.AddBit(1)\n\t\t\t} else {\n\t\t\t\treturn nil, errors.New(\"current node is not a child of its parent\")\n\t\t\t}\n\n\t\t\t// keep going up\n\t\t\tn = p\n\t\t}\n\n\t\tdict[leafNodes[i].symbol] = byteSeq\n\t}\n\n\treturn dict, nil\n}",
"func makeHuffman(p priq.PriQ) *node {\n\trr, ok := p.Remove()\n\tif !ok {\n\t\tpanic(\"not enough elements in the priority queue to make a huffman tree\")\n\t}\t\n\tr := rr.(*node)\n\tll, ok := p.Remove()\n\tif !ok {\n\t\tpanic(\"not enough elements in the priority queue to make a huffman tree\")\n\t}\n\tl := ll.(*node)\t\n\tfor !p.Empty() {\n\t\tparent := new(node)\n\t\tparent.count = l.count + r.count\n\t\tparent.left = l\n\t\tparent.right = r\n\t\tp.Add(parent)\n\n\t\trr, ok = p.Remove()\n\t\tr = rr.(*node)\n\t\tll, ok = p.Remove()\n\t\tl = ll.(*node)\n\t}\n\troot := new(node)\n\troot.count = l.count + r.count\n\troot.left = l\n\troot.right = r\n\treturn root\n}",
"func GetTree() map[string]interface{} {\n\n\treturn tree\n\n}",
"func NewHuffmanEncoder(inp io.ReadSeeker, wc io.Writer) *HuffmanEncoder {\n\the := new(HuffmanEncoder)\n\tfreq := make(map[byte]int)\n\n\tvar b [1]byte\n\t// using the reader, count the frequency of bytes\n\tfor {\n\t\t_, err := inp.Read(b[:])\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tpanic(err)\n\t\t}\n\n\t\t_, ok := freq[b[0]]\n\t\tif !ok {\n\t\t\tfreq[b[0]] = 0\n\t\t}\n\t\tfreq[b[0]]++\n\t}\n\t_, err := inp.Seek(0, io.SeekStart)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpQ := make(PriorityQueue, len(freq))\n\ti := 0\n\tfor v, f := range freq {\n\t\tpQ[i] = NewHNode(v, f)\n\t\ti++\n\t}\n\n\theap.Init(&pQ)\n\n\tfor pQ.Len() > 1 {\n\t\tzero := pQ.Pop()\n\t\tl := zero.(Item)\n\t\tone := pQ.Pop()\n\t\tr := one.(Item)\n\t\tht := NewHTree(l, r)\n\t\theap.Push(&pQ, ht)\n\t}\n\n\thtree := pQ.Pop()\n\troot, ok := htree.(*HTree)\n\tif !ok {\n\t\tpanic(\"Huffman Tree\")\n\t}\n\the.root = root\n\the.dict = make(map[byte]Huffcode)\n\tfilldict(he.root, \"\", he.dict)\n\the.bw = bs.NewWriter(wc)\n\treturn he\n}",
"func BuildHuffmanCode(depth []byte, counts, values []int) {\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tcounts[depth[i]]++\n\t\t}\n\t}\n\tvar offset [kJpegHuffmanMaxBitLength + 1]int\n\tfor i := 1; i <= kJpegHuffmanMaxBitLength; i++ {\n\t\toffset[i] = offset[i-1] + counts[i-1]\n\t}\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tvalues[offset[depth[i]]] = i\n\t\t\toffset[depth[i]]++\n\t\t}\n\t}\n}",
"func NewEncodingTree(freq map[uint8]uint) *Node {\n\tvar head Node // Fictitious head\n\n\tfor i, v := range freq {\n\t\tnode := &Node{\n\t\t\tvalue: i,\n\t\t\tweight: v,\n\t\t}\n\t\thead.insert(node)\n\t}\n\n\tfor head.next != nil && head.next.next != nil {\n\t\tl := head.popFirst()\n\t\tr := head.popFirst()\n\n\t\tnode := join(l, r)\n\t\thead.insert(node)\n\t}\n\n\t// Fictitious head point to tree root\n\tif head.next != nil {\n\t\thead.next.prev = nil\n\t}\n\treturn head.next\n}",
"func main() {\r\n\ttest := \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\n\tsymFreqs := make(map[rune]int)\r\n\t// read each symbol and record the frequencies\r\n\tfor _, c := range test {\r\n\t\tsymFreqs[c]++\r\n\t}\r\n\r\n\t// example tree\r\n\texampleTree := buildTree(symFreqs)\r\n\r\n\t// print out results\r\n\tfmt.Println(\"SYMBOL\\tWEIGHT\\tHUFFMAN CODE\")\r\n\tprintCodes(exampleTree, []byte{})\r\n}",
"func BuildJpegHuffmanTable(count_in, symbols []int, lut []HuffmanTableEntry) int {\n\tvar (\n\t\tcode HuffmanTableEntry // current table entry\n\t\ttable []HuffmanTableEntry // next available space in table\n\t\tlength int // current code length\n\t\tidx int // symbol index\n\t\tkey int // prefix code\n\t\treps int // number of replicate key values in current table\n\t\tlow int // low bits for current root entry\n\t\ttable_bits int // key length of current table\n\t\ttable_size int // size of current table\n\t\ttotal_size int // sum of root table size and 2nd level table sizes\n\t)\n\n\t// Make a local copy of the input bit length histogram.\n\tvar count [kJpegHuffmanMaxBitLength + 1]int\n\ttotal_count := 0\n\tfor length = 1; length <= kJpegHuffmanMaxBitLength; length++ {\n\t\tcount[length] = count_in[length]\n\t\ttotal_count += count[length]\n\t}\n\n\ttable = lut\n\t// table_delta used in go version, to work around pointer arithmetic\n\ttable_delta := 0\n\ttable_bits = kJpegHuffmanRootTableBits\n\ttable_size = 1 << uint(table_bits)\n\ttotal_size = table_size\n\n\t// Special case code with only one value.\n\tif total_count == 1 {\n\t\tcode.bits = 0\n\t\tcode.value = uint16(symbols[0])\n\t\tfor key = 0; key < total_size; key++ {\n\t\t\ttable[key] = code\n\t\t}\n\t\treturn total_size\n\t}\n\n\t// Fill in root table.\n\tkey = 0\n\tidx = 0\n\tfor length = 1; length <= kJpegHuffmanRootTableBits; length++ {\n\t\tfor ; count[length] > 0; count[length]-- {\n\t\t\tcode.bits = uint8(length)\n\t\t\tcode.value = uint16(symbols[idx])\n\t\t\tidx++\n\t\t\treps = 1 << uint(kJpegHuffmanRootTableBits-length)\n\t\t\tfor ; reps > 0; reps-- {\n\t\t\t\ttable[key] = code\n\t\t\t\tkey++\n\t\t\t}\n\t\t}\n\t}\n\n\t// Fill in 2nd level tables and add pointers to root table.\n\ttable = table[table_size:]\n\ttable_delta += table_size\n\ttable_size = 0\n\tlow = 0\n\tfor length = kJpegHuffmanRootTableBits + 1; length <= kJpegHuffmanMaxBitLength; length++ {\n\t\tfor ; count[length] > 0; count[length]-- {\n\t\t\t// Start a new sub-table if the previous one is full.\n\t\t\tif low >= table_size {\n\t\t\t\ttable = table[table_size:]\n\t\t\t\ttable_delta += table_size\n\t\t\t\ttable_bits = NextTableBitSize(count[:], length)\n\t\t\t\ttable_size = 1 << uint(table_bits)\n\t\t\t\ttotal_size += table_size\n\t\t\t\tlow = 0\n\t\t\t\tlut[key].bits = uint8(table_bits + kJpegHuffmanRootTableBits)\n\t\t\t\tlut[key].value = uint16(table_delta - key)\n\t\t\t\tkey++\n\t\t\t}\n\t\t\tcode.bits = uint8(length - kJpegHuffmanRootTableBits)\n\t\t\tcode.value = uint16(symbols[idx])\n\t\t\tidx++\n\t\t\treps = 1 << uint(table_bits-int(code.bits))\n\t\t\tfor ; reps > 0; reps-- {\n\t\t\t\ttable[low] = code\n\t\t\t\tlow++\n\t\t\t}\n\t\t}\n\t}\n\n\treturn total_size\n}",
"func (t *Tree) Hash() []byte {\n\treturn t.rootNode.getHash()\n}",
"func NewHuffmanEncoderWithDict(wc io.Writer, dict []byte) *HuffmanEncoder {\n\the := new(HuffmanEncoder)\n\n\tpQ := make(PriorityQueue, len(dict))\n\tMaxPri := len(dict)\n\tfor i, v := range dict {\n\t\tpQ[i] = NewHNode(v, MaxPri - i)\t// prioritize in order of dict\n\t}\n\n\theap.Init(&pQ)\n\n\tfor pQ.Len() > 1 {\n\t\tzero := pQ.Pop()\n\t\tl := zero.(Item)\n\t\tone := pQ.Pop()\n\t\tr := one.(Item)\n\t\tht := NewHTree(l, r)\n\t\theap.Push(&pQ, ht)\n\t}\n\n\thtree := pQ.Pop()\n\troot, ok := htree.(*HTree)\n\tif !ok {\n\t\tpanic(\"Huffman Tree\")\n\t}\n\the.root = root\n\the.dict = make(map[byte]Huffcode)\n\tfilldict(he.root, \"\", he.dict)\n\the.bw = bs.NewWriter(wc)\n\treturn he\n}",
"func (t *Trie) Tree() gotree.Tree {\n\ttree := gotree.New(t.Name)\n\n\tt.treeAtNode(t.Root, tree)\n\n\treturn tree\n}",
"func (h *hTree) insert(symbol uint32, code uint32, codeLength uint32) error {\n\tif symbol > 0xffff || codeLength > 0xfe {\n\t\treturn errInvalidHuffmanTree\n\t}\n\tbaseCode := uint32(0)\n\tif codeLength > lutSize {\n\t\tbaseCode = uint32(reverseBits[(code>>(codeLength-lutSize))&0xff]) >> (8 - lutSize)\n\t} else {\n\t\tbaseCode = uint32(reverseBits[code&0xff]) >> (8 - codeLength)\n\t\tfor i := 0; i < 1<<(lutSize-codeLength); i++ {\n\t\t\th.lut[baseCode|uint32(i)<<codeLength] = symbol<<8 | (codeLength + 1)\n\t\t}\n\t}\n\n\tn := uint32(0)\n\tfor jump := lutSize; codeLength > 0; {\n\t\tcodeLength--\n\t\tif int(n) > len(h.nodes) {\n\t\t\treturn errInvalidHuffmanTree\n\t\t}\n\t\tswitch h.nodes[n].children {\n\t\tcase leafNode:\n\t\t\treturn errInvalidHuffmanTree\n\t\tcase 0:\n\t\t\tif len(h.nodes) == cap(h.nodes) {\n\t\t\t\treturn errInvalidHuffmanTree\n\t\t\t}\n\t\t\t// Create two empty child nodes.\n\t\t\th.nodes[n].children = int32(len(h.nodes))\n\t\t\th.nodes = h.nodes[:len(h.nodes)+2]\n\t\t}\n\t\tn = uint32(h.nodes[n].children) + 1&(code>>codeLength)\n\t\tjump--\n\t\tif jump == 0 && h.lut[baseCode] == 0 {\n\t\t\th.lut[baseCode] = n << 8\n\t\t}\n\t}\n\n\tswitch h.nodes[n].children {\n\tcase leafNode:\n\t\t// No-op.\n\tcase 0:\n\t\t// Turn the uninitialized node into a leaf.\n\t\th.nodes[n].children = leafNode\n\tdefault:\n\t\treturn errInvalidHuffmanTree\n\t}\n\th.nodes[n].symbol = symbol\n\treturn nil\n}",
"func (d *Dictionary) Dictionary() arrow.Array {\n\tif d.dict == nil {\n\t\td.dict = MakeFromData(d.data.dictionary)\n\t}\n\treturn d.dict\n}",
"func buildCodes(root *huffmanTreeNode, code *vector.Vector, result *dictionary.Dictionary) {\n\tif root == nil {\n\t\treturn\n\t}\n\n\tif isLeafNode(root) {\n\t\tresult.Set(root.value, code)\n\t}\n\n\tbuildCodes(root.left, code.AppendToCopy(byte(0)), result)\n\tbuildCodes(root.right, code.AppendToCopy(byte(1)), result)\n}",
"func (h *hTree) next(d *decoder) (uint32, error) {\n\tvar n uint32\n\t// Read enough bits so that we can use the look-up table.\n\tif d.nBits < lutSize {\n\t\tc, err := d.r.ReadByte()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\t// There are no more bytes of data, but we may still be able\n\t\t\t\t// to read the next symbol out of the previously read bits.\n\t\t\t\tgoto slowPath\n\t\t\t}\n\t\t\treturn 0, err\n\t\t}\n\t\td.bits |= uint32(c) << d.nBits\n\t\td.nBits += 8\n\t}\n\t// Use the look-up table.\n\tn = h.lut[d.bits&lutMask]\n\tif b := n & 0xff; b != 0 {\n\t\tb--\n\t\td.bits >>= b\n\t\td.nBits -= b\n\t\treturn n >> 8, nil\n\t}\n\tn >>= 8\n\td.bits >>= lutSize\n\td.nBits -= lutSize\n\nslowPath:\n\tfor h.nodes[n].children != leafNode {\n\t\tif d.nBits == 0 {\n\t\t\tc, err := d.r.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t\t}\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\td.bits = uint32(c)\n\t\t\td.nBits = 8\n\t\t}\n\t\tn = uint32(h.nodes[n].children) + 1&d.bits\n\t\td.bits >>= 1\n\t\td.nBits--\n\t}\n\treturn h.nodes[n].symbol, nil\n}",
"func (t *TrieNode) MarshalJSON() ([]byte, error) {\n\tvar out map[string]interface{}\n\n\tswitch t.nodeKind {\n\tcase \"extension\":\n\t\tfallthrough\n\tcase \"leaf\":\n\t\tvar hexPrefix string\n\t\tfor _, e := range t.elements[0].([]byte) {\n\t\t\thexPrefix += fmt.Sprintf(\"%x\", e)\n\t\t}\n\n\t\t// if we got a byte we need to do this casting otherwise\n\t\t// it will be marshaled to a base64 encoded value\n\t\tif _, ok := t.elements[1].([]byte); ok {\n\t\t\tvar hexVal string\n\t\t\tfor _, e := range t.elements[1].([]byte) {\n\t\t\t\thexVal += fmt.Sprintf(\"%x\", e)\n\t\t\t}\n\n\t\t\tt.elements[1] = hexVal\n\t\t}\n\n\t\tout = map[string]interface{}{\n\t\t\t\"type\": t.nodeKind,\n\t\t\thexPrefix: t.elements[1],\n\t\t}\n\n\tcase \"branch\":\n\t\tout = map[string]interface{}{\n\t\t\t\"type\": \"branch\",\n\t\t\t\"0\": t.elements[0],\n\t\t\t\"1\": t.elements[1],\n\t\t\t\"2\": t.elements[2],\n\t\t\t\"3\": t.elements[3],\n\t\t\t\"4\": t.elements[4],\n\t\t\t\"5\": t.elements[5],\n\t\t\t\"6\": t.elements[6],\n\t\t\t\"7\": t.elements[7],\n\t\t\t\"8\": t.elements[8],\n\t\t\t\"9\": t.elements[9],\n\t\t\t\"a\": t.elements[10],\n\t\t\t\"b\": t.elements[11],\n\t\t\t\"c\": t.elements[12],\n\t\t\t\"d\": t.elements[13],\n\t\t\t\"e\": t.elements[14],\n\t\t\t\"f\": t.elements[15],\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"nodeKind %s not supported\", t.nodeKind)\n\t}\n\n\treturn json.Marshal(out)\n}",
"func (j *JPEG) decodeHuffman(r io.Reader, h *huffman) (uint8, error) {\n\tif h.nCodes == 0 {\n\t\treturn 0, fmt.Errorf(\"uninitialized Huffman table\")\n\t}\n\n\t/*if d.bits.n < 8 {\n\t if err := d.ensureNBits(8); err != nil {\n\t if err != errMissingFF00 && err != errShortHuffmanData {\n\t return 0, err\n\t }\n\t // There are no more bytes of data in this segment, but we may still\n\t // be able to read the next symbol out of the previously read bits.\n\t // First, undo the readByte that the ensureNBits call made.\n\t if d.bytes.nUnreadable != 0 {\n\t d.unreadByteStuffedByte()\n\t }\n\t goto slowPath\n\t }\n\t }\n\t if v := h.lut[(d.bits.a>>uint32(d.bits.n-lutSize))&0xff]; v != 0 {\n\t n := (v & 0xff) - 1\n\t d.bits.n -= int32(n)\n\t d.bits.m >>= n\n\t return uint8(v >> 8), nil\n\t }*/\n\n\t//slowPath:\n\tfor i, code := 0, int32(0); i < maxCodeLength; i++ {\n\t\tif j.bits.n == 0 {\n\t\t\tif err := j.ensureNBits(r, 1); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t\tif j.bits.a&j.bits.m != 0 {\n\t\t\tcode |= 1\n\t\t}\n\t\tj.bits.n--\n\t\tj.bits.m >>= 1\n\t\tif code <= h.maxCodes[i] {\n\t\t\treturn h.vals[h.valsIndices[i]+code-h.minCodes[i]], nil\n\t\t}\n\t\tcode <<= 1\n\t}\n\treturn 0, fmt.Errorf(\"bad Huffman code\")\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Interpret yilds a byte by interpreting the given bitstream on this Huffman tree | func (t Tree) Interpret(bs *bitstream.BitStream) byte {
return t.walk(t.root, bs)
} | [
"func decodeHuffmanCode(codes *vector.Vector, index int, root *huffmanTreeNode, to *vector.Vector) (int, error) {\n\tif root == nil {\n\t\treturn 0, errors.New(\"No prefix tree supplied\")\n\t}\n\n\tif isLeafNode(root) {\n\t\tto.Append(root.value)\n\t\treturn index, nil\n\t}\n\n\tnext := codes.MustGet(index)\n\tswitch next {\n\tcase byte(0):\n\t\treturn decodeHuffmanCode(codes, index+1, root.left, to)\n\tcase byte(1):\n\t\treturn decodeHuffmanCode(codes, index+1, root.right, to)\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"An unexpected symbol %x found in the compressed data\", next)\n\t}\n}",
"func (s *scanner) Process(b byte) (oper op, err error) {\n\tvar include bool\n\tnext := s.s\n\n\t// 1. Determine State Transition\n\t//\n\t// Find out what the next state of our byte-interpretation (scanState)\n\t// is. Determine the high level operation that needs to happen, if any\n\t// need to. Determine whether to include the examined byte as a value\n\t// in our buffer: not all semantic bytes that are examined have value,\n\t// for example the prototypical 't' for true is both a semantic and\n\t// value byte, 'i' denoting an int is only semantically meaningful but\n\t// provides no value to the integer being described.\n\tswitch s.s {\n\tcase scanFindToken:\n\t\tnext, oper, include, err = s.enc.mustFindToken(b)\n\tcase scanTokenLen:\n\t\tnext, oper, include, err = s.enc.scanTokenLen(b)\n\tcase scanSymbol:\n\t\tnext, oper, include = s.processLengthDeterminedType(valSymbolOp)\n\tcase scanString:\n\t\tnext, oper, include = s.processLengthDeterminedType(valStringOp)\n\tcase scanByteArr:\n\t\tnext, oper, include = s.processLengthDeterminedType(valByteArrOp)\n\tcase scanInt:\n\t\tnext, oper, include, err = s.enc.scanIntToken(b)\n\tcase scanFirstInt:\n\t\tnext, oper, include, err = s.enc.scanFirstIntToken(b)\n\tcase scanFloat64:\n\t\tnext, oper, include, err = s.enc.scanFloat64Token(b)\n\tcase scanFloat32:\n\t\tnext, oper, include, err = s.enc.scanFloat32Token(b)\n\tdefault:\n\t\terr = fmt.Errorf(\"syrup unknown scanstate: %d\", s.s)\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// 2. Include the bytes into the buffer if necessary.\n\tif include {\n\t\t// strings.Builder.WriteByte always returns 'nil'\n\t\t_ = s.buf.WriteByte(b)\n\t}\n\t// 3. In the special case of parsing a token-length, set our internal\n\t// buffer and length counters appropriately.\n\t//\n\t// Unfortunately this is a leak between the encoding and this generic\n\t// scanner.\n\tif s.s == scanTokenLen && next != scanTokenLen {\n\t\tif oper, err = s.processParsedLen(next); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\t// 4. Finally, transition to the next state.\n\ts.s = next\n\t// 5. If a value-op was returned, it is up to the caller to ensure they\n\t// call one of the value functions, which has the side effect of\n\t// clearing the internal buffer.\n\treturn\n}",
"func (j *JPEG) decodeHuffman(r io.Reader, h *huffman) (uint8, error) {\n\tif h.nCodes == 0 {\n\t\treturn 0, fmt.Errorf(\"uninitialized Huffman table\")\n\t}\n\n\t/*if d.bits.n < 8 {\n\t if err := d.ensureNBits(8); err != nil {\n\t if err != errMissingFF00 && err != errShortHuffmanData {\n\t return 0, err\n\t }\n\t // There are no more bytes of data in this segment, but we may still\n\t // be able to read the next symbol out of the previously read bits.\n\t // First, undo the readByte that the ensureNBits call made.\n\t if d.bytes.nUnreadable != 0 {\n\t d.unreadByteStuffedByte()\n\t }\n\t goto slowPath\n\t }\n\t }\n\t if v := h.lut[(d.bits.a>>uint32(d.bits.n-lutSize))&0xff]; v != 0 {\n\t n := (v & 0xff) - 1\n\t d.bits.n -= int32(n)\n\t d.bits.m >>= n\n\t return uint8(v >> 8), nil\n\t }*/\n\n\t//slowPath:\n\tfor i, code := 0, int32(0); i < maxCodeLength; i++ {\n\t\tif j.bits.n == 0 {\n\t\t\tif err := j.ensureNBits(r, 1); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\t\tif j.bits.a&j.bits.m != 0 {\n\t\t\tcode |= 1\n\t\t}\n\t\tj.bits.n--\n\t\tj.bits.m >>= 1\n\t\tif code <= h.maxCodes[i] {\n\t\t\treturn h.vals[h.valsIndices[i]+code-h.minCodes[i]], nil\n\t\t}\n\t\tcode <<= 1\n\t}\n\treturn 0, fmt.Errorf(\"bad Huffman code\")\n}",
"func (h *hTree) next(d *decoder) (uint32, error) {\n\tvar n uint32\n\t// Read enough bits so that we can use the look-up table.\n\tif d.nBits < lutSize {\n\t\tc, err := d.r.ReadByte()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\t// There are no more bytes of data, but we may still be able\n\t\t\t\t// to read the next symbol out of the previously read bits.\n\t\t\t\tgoto slowPath\n\t\t\t}\n\t\t\treturn 0, err\n\t\t}\n\t\td.bits |= uint32(c) << d.nBits\n\t\td.nBits += 8\n\t}\n\t// Use the look-up table.\n\tn = h.lut[d.bits&lutMask]\n\tif b := n & 0xff; b != 0 {\n\t\tb--\n\t\td.bits >>= b\n\t\td.nBits -= b\n\t\treturn n >> 8, nil\n\t}\n\tn >>= 8\n\td.bits >>= lutSize\n\td.nBits -= lutSize\n\nslowPath:\n\tfor h.nodes[n].children != leafNode {\n\t\tif d.nBits == 0 {\n\t\t\tc, err := d.r.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t\t}\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\td.bits = uint32(c)\n\t\t\td.nBits = 8\n\t\t}\n\t\tn = uint32(h.nodes[n].children) + 1&d.bits\n\t\td.bits >>= 1\n\t\td.nBits--\n\t}\n\treturn h.nodes[n].symbol, nil\n}",
"func (f *decompressor) huffmanBlock(hl, hd *huffmanDecoder) {\n\tfor {\n\t\tv := f.huffSym(hl)\n\t\tif f.err {\n\t\t\treturn\n\t\t}\n\t\tvar n uint // number of bits extra\n\t\tvar length int\n\t\tswitch {\n\t\tcase v < 256:\n\t\t\tf.out = append(f.out, byte(v))\n\t\t\tcontinue\n\t\tcase v == 256:\n\t\t\t// Done with huffman block; read next block.\n\t\t\treturn\n\t\t// otherwise, reference to older data\n\t\tcase v < 265:\n\t\t\tlength = v - (257 - 3)\n\t\t\tn = 0\n\t\tcase v < 269:\n\t\t\tlength = v*2 - (265*2 - 11)\n\t\t\tn = 1\n\t\tcase v < 273:\n\t\t\tlength = v*4 - (269*4 - 19)\n\t\t\tn = 2\n\t\tcase v < 277:\n\t\t\tlength = v*8 - (273*8 - 35)\n\t\t\tn = 3\n\t\tcase v < 281:\n\t\t\tlength = v*16 - (277*16 - 67)\n\t\t\tn = 4\n\t\tcase v < 285:\n\t\t\tlength = v*32 - (281*32 - 131)\n\t\t\tn = 5\n\t\tdefault:\n\t\t\tlength = 258\n\t\t\tn = 0\n\t\t}\n\t\tif n > 0 {\n\t\t\tfor f.nb < n {\n\t\t\t\tif f.moreBits(); f.err {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tlength += int(f.b & uint32(1<<n-1))\n\t\t\tf.b >>= n\n\t\t\tf.nb -= n\n\t\t}\n\n\t\tvar dist int\n\t\tif hd == nil {\n\t\t\tfor f.nb < 5 {\n\t\t\t\tif f.moreBits(); f.err {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tdist = int(reverseByte[(f.b&0x1F)<<3])\n\t\t\tf.b >>= 5\n\t\t\tf.nb -= 5\n\t\t} else {\n\t\t\tif dist = f.huffSym(hd); f.err {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tswitch {\n\t\tcase dist < 4:\n\t\t\tdist++\n\t\tcase dist >= 30:\n\t\t\tf.err = true\n\t\t\treturn\n\t\tdefault:\n\t\t\tnb := uint(dist-2) >> 1\n\t\t\t// have 1 bit in bottom of dist, need nb more.\n\t\t\textra := (dist & 1) << nb\n\t\t\tfor f.nb < nb {\n\t\t\t\tif f.moreBits(); f.err {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\textra |= int(f.b & uint32(1<<nb-1))\n\t\t\tf.b >>= nb\n\t\t\tf.nb -= nb\n\t\t\tdist = 1<<(nb+1) + 1 + extra\n\t\t}\n\n\t\t// Copy [-dist:-dist+length] into output.\n\t\t// Encoding can be prescient, so no check on length.\n\t\tif dist > len(f.out) {\n\t\t\tf.err = true\n\t\t\treturn\n\t\t}\n\n\t\tp := len(f.out) - dist\n\t\tfor i := 0; i < length; i++ {\n\t\t\tf.out = append(f.out, f.out[p])\n\t\t\tp++\n\t\t}\n\t}\n}",
"func decodeType(t byte) (byte, byte) { return t >> 2, t & 3 }",
"func (d *decoder) decodeHeader() {\n\t// first byte is the number of leaf nodes\n\td.numChars = uint8(readByte(d.r))\n\n\t// read in the total number of characters in the encoded data\n\tbuf := make([]byte, 2)\n\tbuf[0] = readByte(d.r)\n\tbuf[1] = readByte(d.r)\n\n\td.numCharsEncoded = binary.LittleEndian.Uint16(buf)\n\n\t// deserialize the tree\n\td.root = d.createTree()\n}",
"func (h *hTree) insert(symbol uint32, code uint32, codeLength uint32) error {\n\tif symbol > 0xffff || codeLength > 0xfe {\n\t\treturn errInvalidHuffmanTree\n\t}\n\tbaseCode := uint32(0)\n\tif codeLength > lutSize {\n\t\tbaseCode = uint32(reverseBits[(code>>(codeLength-lutSize))&0xff]) >> (8 - lutSize)\n\t} else {\n\t\tbaseCode = uint32(reverseBits[code&0xff]) >> (8 - codeLength)\n\t\tfor i := 0; i < 1<<(lutSize-codeLength); i++ {\n\t\t\th.lut[baseCode|uint32(i)<<codeLength] = symbol<<8 | (codeLength + 1)\n\t\t}\n\t}\n\n\tn := uint32(0)\n\tfor jump := lutSize; codeLength > 0; {\n\t\tcodeLength--\n\t\tif int(n) > len(h.nodes) {\n\t\t\treturn errInvalidHuffmanTree\n\t\t}\n\t\tswitch h.nodes[n].children {\n\t\tcase leafNode:\n\t\t\treturn errInvalidHuffmanTree\n\t\tcase 0:\n\t\t\tif len(h.nodes) == cap(h.nodes) {\n\t\t\t\treturn errInvalidHuffmanTree\n\t\t\t}\n\t\t\t// Create two empty child nodes.\n\t\t\th.nodes[n].children = int32(len(h.nodes))\n\t\t\th.nodes = h.nodes[:len(h.nodes)+2]\n\t\t}\n\t\tn = uint32(h.nodes[n].children) + 1&(code>>codeLength)\n\t\tjump--\n\t\tif jump == 0 && h.lut[baseCode] == 0 {\n\t\t\th.lut[baseCode] = n << 8\n\t\t}\n\t}\n\n\tswitch h.nodes[n].children {\n\tcase leafNode:\n\t\t// No-op.\n\tcase 0:\n\t\t// Turn the uninitialized node into a leaf.\n\t\th.nodes[n].children = leafNode\n\tdefault:\n\t\treturn errInvalidHuffmanTree\n\t}\n\th.nodes[n].symbol = symbol\n\treturn nil\n}",
"func (r *Reader) readHuff(data block, off int, table []uint16) (tableBits, roff int, err error) {\n\tif off >= len(data) {\n\t\treturn 0, 0, r.makeEOFError(off)\n\t}\n\n\thdr := data[off]\n\toff++\n\n\tvar weights [256]uint8\n\tvar count int\n\tif hdr < 128 {\n\t\t// The table is compressed using an FSE. RFC 4.2.1.2.\n\t\tif len(r.fseScratch) < 1<<6 {\n\t\t\tr.fseScratch = make([]fseEntry, 1<<6)\n\t\t}\n\t\tfseBits, noff, err := r.readFSE(data, off, 255, 6, r.fseScratch)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\t\tfseTable := r.fseScratch\n\n\t\tif off+int(hdr) > len(data) {\n\t\t\treturn 0, 0, r.makeEOFError(off)\n\t\t}\n\n\t\trbr, err := r.makeReverseBitReader(data, off+int(hdr)-1, noff)\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\n\t\tstate1, err := rbr.val(uint8(fseBits))\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\n\t\tstate2, err := rbr.val(uint8(fseBits))\n\t\tif err != nil {\n\t\t\treturn 0, 0, err\n\t\t}\n\n\t\t// There are two independent FSE streams, tracked by\n\t\t// state1 and state2. We decode them alternately.\n\n\t\tfor {\n\t\t\tpt := &fseTable[state1]\n\t\t\tif !rbr.fetch(pt.bits) {\n\t\t\t\tif count >= 254 {\n\t\t\t\t\treturn 0, 0, rbr.makeError(\"Huffman count overflow\")\n\t\t\t\t}\n\t\t\t\tweights[count] = pt.sym\n\t\t\t\tweights[count+1] = fseTable[state2].sym\n\t\t\t\tcount += 2\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tv, err := rbr.val(pt.bits)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, 0, err\n\t\t\t}\n\t\t\tstate1 = uint32(pt.base) + v\n\n\t\t\tif count >= 255 {\n\t\t\t\treturn 0, 0, rbr.makeError(\"Huffman count overflow\")\n\t\t\t}\n\n\t\t\tweights[count] = pt.sym\n\t\t\tcount++\n\n\t\t\tpt = &fseTable[state2]\n\n\t\t\tif !rbr.fetch(pt.bits) {\n\t\t\t\tif count >= 254 {\n\t\t\t\t\treturn 0, 0, rbr.makeError(\"Huffman count overflow\")\n\t\t\t\t}\n\t\t\t\tweights[count] = pt.sym\n\t\t\t\tweights[count+1] = fseTable[state1].sym\n\t\t\t\tcount += 2\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tv, err = rbr.val(pt.bits)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, 0, err\n\t\t\t}\n\t\t\tstate2 = uint32(pt.base) + v\n\n\t\t\tif count >= 255 {\n\t\t\t\treturn 0, 0, rbr.makeError(\"Huffman count overflow\")\n\t\t\t}\n\n\t\t\tweights[count] = pt.sym\n\t\t\tcount++\n\t\t}\n\n\t\toff += int(hdr)\n\t} else {\n\t\t// The table is not compressed. Each weight is 4 bits.\n\n\t\tcount = int(hdr) - 127\n\t\tif off+((count+1)/2) >= len(data) {\n\t\t\treturn 0, 0, io.ErrUnexpectedEOF\n\t\t}\n\t\tfor i := 0; i < count; i += 2 {\n\t\t\tb := data[off]\n\t\t\toff++\n\t\t\tweights[i] = b >> 4\n\t\t\tweights[i+1] = b & 0xf\n\t\t}\n\t}\n\n\t// RFC 4.2.1.3.\n\n\tvar weightMark [13]uint32\n\tweightMask := uint32(0)\n\tfor _, w := range weights[:count] {\n\t\tif w > 12 {\n\t\t\treturn 0, 0, r.makeError(off, \"Huffman weight overflow\")\n\t\t}\n\t\tweightMark[w]++\n\t\tif w > 0 {\n\t\t\tweightMask += 1 << (w - 1)\n\t\t}\n\t}\n\tif weightMask == 0 {\n\t\treturn 0, 0, r.makeError(off, \"bad Huffman weights\")\n\t}\n\n\ttableBits = 32 - bits.LeadingZeros32(weightMask)\n\tif tableBits > maxHuffmanBits {\n\t\treturn 0, 0, r.makeError(off, \"bad Huffman weights\")\n\t}\n\n\tif len(table) < 1<<tableBits {\n\t\treturn 0, 0, r.makeError(off, \"Huffman table too small\")\n\t}\n\n\t// Work out the last weight value, which is omitted because\n\t// the weights must sum to a power of two.\n\tleft := (uint32(1) << tableBits) - weightMask\n\tif left == 0 {\n\t\treturn 0, 0, r.makeError(off, \"bad Huffman weights\")\n\t}\n\thighBit := 31 - bits.LeadingZeros32(left)\n\tif uint32(1)<<highBit != left {\n\t\treturn 0, 0, r.makeError(off, \"bad Huffman weights\")\n\t}\n\tif count >= 256 {\n\t\treturn 0, 0, r.makeError(off, \"Huffman weight overflow\")\n\t}\n\tweights[count] = uint8(highBit + 1)\n\tcount++\n\tweightMark[highBit+1]++\n\n\tif weightMark[1] < 2 || weightMark[1]&1 != 0 {\n\t\treturn 0, 0, r.makeError(off, \"bad Huffman weights\")\n\t}\n\n\t// Change weightMark from a count of weights to the index of\n\t// the first symbol for that weight. We shift the indexes to\n\t// also store how many we have seen so far,\n\tnext := uint32(0)\n\tfor i := 0; i < tableBits; i++ {\n\t\tcur := next\n\t\tnext += weightMark[i+1] << i\n\t\tweightMark[i+1] = cur\n\t}\n\n\tfor i, w := range weights[:count] {\n\t\tif w == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tlength := uint32(1) << (w - 1)\n\t\ttval := uint16(i)<<8 | (uint16(tableBits) + 1 - uint16(w))\n\t\tstart := weightMark[w]\n\t\tfor j := uint32(0); j < length; j++ {\n\t\t\ttable[start+j] = tval\n\t\t}\n\t\tweightMark[w] += length\n\t}\n\n\treturn tableBits, off, nil\n}",
"func decodeNode(hash, buf []byte, cachegen uint16) (node, error) {\n\tif len(buf) == 0 {\n\t\treturn nil, io.ErrUnexpectedEOF\n\t}\n\telems, _, err := ser.SplitList(buf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"decode error: %v\", err)\n\t}\n\tswitch c, _ := ser.CountValues(elems); c {\n\tcase 2:\n\t\tn, err := decodeShort(hash, elems, cachegen)\n\t\treturn n, wrapError(err, \"short\")\n\tcase 17:\n\t\tn, err := decodeFull(hash, elems, cachegen)\n\t\treturn n, wrapError(err, \"full\")\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid number of list elements: %v\", c)\n\t}\n}",
"func getBitFromByte(b byte, indexInByte int) byte {\n\tb = b << uint(indexInByte)\n\tvar mask byte = 0x80\n\n\tvar bit byte = mask & b\n\n\tif bit == 128 {\n\t\treturn 1\n\t}\n\treturn 0\n}",
"func (t *Tokeniser) tokeniseByte(b byte) bool {\n\tif t.escapeNextChar {\n\t\tt.put(b)\n\t\tt.escapeNextChar = false\n\t\treturn false\n\t}\n\n\tfuncs := map[quoteType]func(b byte) bool{\n\t\tnone: t.tokeniseNoQuotes,\n\t\tsingle: t.tokeniseSingleQuotes,\n\t\tdouble: t.tokeniseDoubleQuotes,\n\t}\n\n\treturn funcs[t.currentQuoteType](b)\n}",
"func (p *gc_bin_parser) rawByte() byte {\n\tb := p.data[0]\n\tr := 1\n\tif b == '|' {\n\t\tb = p.data[1]\n\t\tr = 2\n\t\tswitch b {\n\t\tcase 'S':\n\t\t\tb = '$'\n\t\tcase '|':\n\t\t\t// nothing to do\n\t\tdefault:\n\t\t\tpanic(\"unexpected escape sequence in export data\")\n\t\t}\n\t}\n\tp.data = p.data[r:]\n\tp.read += r\n\treturn b\n\n}",
"func decompressPrefixTree(compressed *vector.Vector, index int) (*huffmanTreeNode, int) {\n\tbyt := compressed.MustGet(index).(byte)\n\tswitch byt {\n\tcase byte(0):\n\t\tleft, nextIndex := decompressPrefixTree(compressed, index+1)\n\t\tright, nextIndex := decompressPrefixTree(compressed, nextIndex)\n\t\treturn &huffmanTreeNode{left: left, right: right}, nextIndex\n\n\tcase byte(1):\n\t\treturn &huffmanTreeNode{value: compressed.MustGet(index + 1).(byte)}, index + 2\n\n\tdefault:\n\t\treturn nil, index + 1\n\t}\n}",
"func (btd Decoder) Decode(data []byte) (bt BitTag, err error) {\n\tif len(data)*8 < btd.BitLength() {\n\t\terr = errors.Errorf(\"invalid data length %d; expected %d bits\",\n\t\t\tlen(data)*8, btd.BitLength())\n\t\treturn\n\t}\n\n\tfields, err := btd.Explode(data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbt.uriPrefix = btd.uriPrefix\n\tbt.fields = make([]interface{}, btd.NumFields())\n\tbuff := make([]byte, 8)\n\tfor fieldIdx, field := range fields {\n\t\tif len(field) <= 8 {\n\t\t\tbinary.BigEndian.PutUint64(buff, 0)\n\t\t\tcopy(buff[8-len(field):], field)\n\t\t\tbt.fields[fieldIdx] = binary.BigEndian.Uint64(buff)\n\t\t} else {\n\t\t\tbigInt := big.NewInt(0)\n\t\t\tbigInt.SetBytes(field)\n\t\t\tbt.fields[fieldIdx] = bigInt\n\t\t}\n\t}\n\n\treturn\n}",
"func binarywiredecode(in, buf []byte) (newbuf, msg []byte) {\n\tif buf != nil {\n\t\tbuf = buf[:0]\n\t} else {\n\t\tbuf = make([]byte, 0, 256)\n\t}\n\n\tstart := 1\n\n\tfor i := 0; i < len(in); i++ {\n\t\tswitch ch := in[i]; ch {\n\t\tcase '}': // escape\n\t\t\tif i+1 >= len(in) {\n\t\t\t\tbuf = append(buf, ch)\n\t\t\t} else {\n\t\t\t\tbuf = append(buf, in[i+1]^escapeXor)\n\t\t\t\ti++\n\t\t\t}\n\t\tcase '#': // end of packet\n\t\t\treturn buf, buf[start:]\n\t\tdefault:\n\t\t\tbuf = append(buf, ch)\n\t\t}\n\t}\n\treturn buf, buf[start:]\n}",
"func parseBranch(data []byte, i int) (Node, int) {\n\tif data[i] != '(' {\n\t\tpanic(fmt.Sprintf(\"internal error at offset %d: expected '(', got %c\", i, data[i]))\n\t}\n\ti++\n\tvar br BranchNode\n\tfor i < len(data) {\n\t\tnode, j := parseSequence(data, i)\n\t\tif j > i {\n\t\t\tbr = append(br, node)\n\t\t}\n\t\tswitch data[j] {\n\t\tcase ')':\n\t\t\treturn br, j\n\t\tcase '|':\n\t\t\ti = j + 1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"parse error at offset %d: expected ')' or '|', got %c (%[1]d)\", j, data[j]))\n\t\t}\n\t}\n\tpanic(\"unexpected end of input\")\n}",
"func decodeTrieNode(c *cid.Cid, b []byte,\n\tleafDecoder trieNodeLeafDecoder) (*TrieNode, error) {\n\tvar (\n\t\ti, decoded, elements []interface{}\n\t\tnodeKind string\n\t\terr error\n\t)\n\n\terr = rlp.DecodeBytes(b, &i)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcodec := c.Type()\n\tswitch len(i) {\n\tcase 2:\n\t\tnodeKind, decoded, err = decodeCompactKey(i)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif nodeKind == \"extension\" {\n\t\t\telements, err = parseTrieNodeExtension(decoded, codec)\n\t\t}\n\t\tif nodeKind == \"leaf\" {\n\t\t\telements, err = leafDecoder(decoded)\n\t\t}\n\t\tif nodeKind != \"extension\" && nodeKind != \"leaf\" {\n\t\t\treturn nil, fmt.Errorf(\"unexpected nodeKind returned from decoder\")\n\t\t}\n\tcase 17:\n\t\tnodeKind = \"branch\"\n\t\telements, err = parseTrieNodeBranch(i, codec)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown trie node type\")\n\t}\n\n\treturn &TrieNode{\n\t\tnodeKind: nodeKind,\n\t\telements: elements,\n\t\trawdata: b,\n\t\tcid: c,\n\t}, nil\n}",
"func filenodehash(filename string,ch string,nodenum int64,readbitlen int64) []byte{\n\t//H(ch)is parsed into k indexes.\n\t//Calculate the hash value HCH of ch\n\tvar Hch string = GetSHA256HashCodeString(ch)\n\tvar Hchbyte, _ = hex.DecodeString(Hch)\n\t//Hch,_ := hex.DecodeString(ch)\n\tfmt.Println(\"Hch is \", Hch)\n\tfmt.Println(\"Hchbyte is \", Hchbyte)\n\t//Convert Hch to 01 string\n\tvar Hchstring string = biu.ToBinaryString(Hchbyte)\n\t//remove all \"[\"\n\tHchstring = strings.Replace(Hchstring, \"[\", \"\", -1)\n\t//remove all \"]\"\n\tHchstring = strings.Replace(Hchstring, \"]\", \"\", -1)\n\t//remove all space\n\tHchstring = strings.Replace(Hchstring, \" \", \"\", -1)\n\tfmt.Println(\"Hchstring is \", Hchstring)\n\t//convert nodenum to 01\n\tvar bittosting string = biu.ToBinaryString(nodenum)\n\n\tbittosting = strings.Replace(bittosting, \"[\", \"\", -1)\n\tbittosting = strings.Replace(bittosting, \"]\", \"\", -1)\n\tbittosting = strings.Replace(bittosting, \" \", \"\", -1)\n\tvar stringlen = len(bittosting)\n\n\tfmt.Println(\"nodenum is \", bittosting)\n\tfmt.Println(\"stringlen is \", stringlen)\n\n\tvar stringiter int = 0\n\tvar zerolen int = 0\n\tfor stringiter = 0; stringiter < stringlen; stringiter++ {\n\t\tif '0' != bittosting[stringiter] {\n\t\t\t//zerolen = stringiter + 1\n\t\t\tzerolen = stringiter\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfmt.Println(\"zerolen is \", zerolen)\n\n\n\n\t//The calculation requires eachlen bits to represent the total number of leaf nodes.\n\t//For example, if the number of leaf nodes is 245441, 17 bits are needed to represent it\n\tvar eachlen uintptr = ((unsafe.Sizeof(nodenum) * 8) - uintptr(zerolen))\n\tfmt.Println(\"eachlen is \", eachlen)\n\n\n\n\t//由Hchstring切割得到原文件序号\n\tvar fileposition []int64\n\t//将Hchstring的bit字符串按每eachlen一份进行切割,生成[]string\n\tvar Hcharray []string = ChunkString(Hchstring, int(eachlen))\n\t//fmt.Println(\"chunkarray is \", chunkarray)\n\tvar filebititer int = 0\n\tfor filebititer = 0; filebititer < len(Hcharray); filebititer++ {\n\t\tvar tmpint int64 = 0\n\t\tvar partiter int = 0\n\t\tfor partiter = 0; partiter < len(Hcharray[filebititer]); partiter++ {\n\t\t\ttmpint = (tmpint << 1)\n\t\t\tif '1' == Hcharray[filebititer][partiter] {\n\t\t\t\ttmpint = (tmpint) ^ 1\n\t\t\t}\n\t\t\tif tmpint >= nodenum {\n\t\t\t\ttmpint = tmpint % nodenum\n\t\t\t}\n\n\t\t}\n\t\tfileposition = append(fileposition, tmpint)\n\t}\n\n\tfmt.Println(\"fileposition is \", fileposition)\n\tfileposition = RemoveRepeatedElement(fileposition)\n\tfmt.Println(\"fileposition is \", fileposition)\n\tvar fileretdata []byte\n\t//retdata, _ := ReadBlock(filename, readbitlen, 0*readbitlen)\n\t//fmt.Println(\"000000000000retdata is \", retdata)\n\tvar readiter int\n\tfor readiter = 0; readiter < len(fileposition); readiter++ {\n\t\t//fmt.Println(\"readiter is \", readiter)\n\t\t//fmt.Println(\"now fileposition is \", fileposition[readiter])\n\t\tretdata, _ := ReadBlock(filename, readbitlen, (fileposition[readiter])*readbitlen)\n\t\t//fmt.Println(\"retdata is \", retdata)\n\t\tfor _,nounceum := range retdata{\n\t\t\tfileretdata=append(fileretdata,nounceum)\n\t\t}\n\n\t}\n\tfmt.Println(\"fileretdata is \", fileretdata)\n\tfileretdata_hash := GetSHA256HashCode([]byte(fileretdata))\n\n\tvar filebyte_hash []byte\n\tfilebyte_hash, _ = hex.DecodeString(fileretdata_hash)\n\tfmt.Println(\"filebyte_hash is \", filebyte_hash)\n\treturn filebyte_hash\n\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AsBitstreams encodes this Huffman tree in a bitstream | func (t Tree) AsBitstream() bitstream.BitStream {
result := bitstream.BitStream{}
t.asBitstream(&result, t.root)
return result
} | [
"func NewEncodingTree(freq map[uint8]uint) *Node {\n\tvar head Node // Fictitious head\n\n\tfor i, v := range freq {\n\t\tnode := &Node{\n\t\t\tvalue: i,\n\t\t\tweight: v,\n\t\t}\n\t\thead.insert(node)\n\t}\n\n\tfor head.next != nil && head.next.next != nil {\n\t\tl := head.popFirst()\n\t\tr := head.popFirst()\n\n\t\tnode := join(l, r)\n\t\thead.insert(node)\n\t}\n\n\t// Fictitious head point to tree root\n\tif head.next != nil {\n\t\thead.next.prev = nil\n\t}\n\treturn head.next\n}",
"func encodeToHuffmanCodes(uncompressed *vector.Vector, codes *dictionary.Dictionary) *vector.Vector {\n\tencodedHuffmanCodes := vector.New()\n\n\tfor i := 0; i < uncompressed.Size(); i++ {\n\t\tbyt := uncompressed.MustGet(i)\n\n\t\tiCode, _ := codes.Get(byt)\n\t\tcode := iCode.(*vector.Vector)\n\n\t\tfor j := 0; j < code.Size(); j++ {\n\t\t\tencodedHuffmanCodes.Append(code.MustGet(j))\n\t\t}\n\t}\n\n\treturn encodedHuffmanCodes\n}",
"func compressPrefixTree(root *huffmanTreeNode, to *vector.Vector) {\n\tswitch isLeafNode(root) {\n\tcase true:\n\t\tto.Append(byte(1))\n\t\tto.Append(root.value)\n\tcase false:\n\t\tto.Append(byte(0))\n\t\tcompressPrefixTree(root.left, to)\n\t\tcompressPrefixTree(root.right, to)\n\t}\n}",
"func CreateBinaryTree() {\n\tfmt.Fprintln(os.Stderr, \"CreateBinaryTree\")\n\tvar min1i, min2i, pos1, pos2 int\n\tvar point []int = make([]int, MAX_CODE_LENGTH)\n\tvar code []byte = make([]byte, MAX_CODE_LENGTH)\n\tvar count []int64 = make([]int64, vocab_size*2+1)\n\tvar binaryt []int = make([]int, vocab_size*2+1)\n\tvar parent_node []int = make([]int, vocab_size*2+1)\n\tfor a := 0; a < vocab_size; a++ {\n\t\tcount[a] = int64(vocab[a].cn)\n\t}\n\tfor a := vocab_size; a < vocab_size*2; a++ {\n\t\tcount[a] = 1e15\n\t}\n\tpos1 = vocab_size - 1\n\tpos2 = vocab_size\n\t// Following algorithm constructs the Huffman tree by adding one node at a time\n\tfor a := 0; a < vocab_size-1; a++ {\n\t\t// First, find two smallest nodes 'min1, min2'\n\t\tif pos1 >= 0 {\n\t\t\tif count[pos1] < count[pos2] {\n\t\t\t\tmin1i = pos1\n\t\t\t\tpos1--\n\t\t\t} else {\n\t\t\t\tmin1i = pos2\n\t\t\t\tpos2++\n\t\t\t}\n\t\t} else {\n\t\t\tmin1i = pos2\n\t\t\tpos2++\n\t\t}\n\t\tif pos1 >= 0 {\n\t\t\tif count[pos1] < count[pos2] {\n\t\t\t\tmin2i = pos1\n\t\t\t\tpos1--\n\t\t\t} else {\n\t\t\t\tmin2i = pos2\n\t\t\t\tpos2++\n\t\t\t}\n\t\t} else {\n\t\t\tmin2i = pos2\n\t\t\tpos2++\n\t\t}\n\t\tcount[vocab_size+a] = count[min1i] + count[min2i]\n\t\tparent_node[min1i] = vocab_size + a\n\t\tparent_node[min2i] = vocab_size + a\n\t\tbinaryt[min2i] = 1\n\t}\n\t// Now assign binary code to each vocabulary character\n\tfor a := 0; a < vocab_size; a++ {\n\t\tb := a\n\t\ti := 0\n\t\tfor {\n\t\t\tcode[i] = byte(binaryt[b])\n\t\t\tpoint[i] = b\n\t\t\ti++\n\t\t\tb = parent_node[b]\n\t\t\tif b == vocab_size*2-2 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tvocab[a].codelen = byte(i)\n\t\tvocab[a].point[0] = vocab_size - 2\n\t\tfor b = 0; b < i; b++ {\n\t\t\tvocab[a].code[i-b-1] = code[b]\n\t\t\tvocab[a].point[i-b] = point[b] - vocab_size\n\t\t}\n\t}\n}",
"func BuildHuffmanCode(depth []byte, counts, values []int) {\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tcounts[depth[i]]++\n\t\t}\n\t}\n\tvar offset [kJpegHuffmanMaxBitLength + 1]int\n\tfor i := 1; i <= kJpegHuffmanMaxBitLength; i++ {\n\t\toffset[i] = offset[i-1] + counts[i-1]\n\t}\n\tfor i := 0; i < JpegHistogram_kSize; i++ {\n\t\tif depth[i] > 0 {\n\t\t\tvalues[offset[depth[i]]] = i\n\t\t\toffset[depth[i]]++\n\t\t}\n\t}\n}",
"func NewHuffmanEncoder(inp io.ReadSeeker, wc io.Writer) *HuffmanEncoder {\n\the := new(HuffmanEncoder)\n\tfreq := make(map[byte]int)\n\n\tvar b [1]byte\n\t// using the reader, count the frequency of bytes\n\tfor {\n\t\t_, err := inp.Read(b[:])\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tpanic(err)\n\t\t}\n\n\t\t_, ok := freq[b[0]]\n\t\tif !ok {\n\t\t\tfreq[b[0]] = 0\n\t\t}\n\t\tfreq[b[0]]++\n\t}\n\t_, err := inp.Seek(0, io.SeekStart)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpQ := make(PriorityQueue, len(freq))\n\ti := 0\n\tfor v, f := range freq {\n\t\tpQ[i] = NewHNode(v, f)\n\t\ti++\n\t}\n\n\theap.Init(&pQ)\n\n\tfor pQ.Len() > 1 {\n\t\tzero := pQ.Pop()\n\t\tl := zero.(Item)\n\t\tone := pQ.Pop()\n\t\tr := one.(Item)\n\t\tht := NewHTree(l, r)\n\t\theap.Push(&pQ, ht)\n\t}\n\n\thtree := pQ.Pop()\n\troot, ok := htree.(*HTree)\n\tif !ok {\n\t\tpanic(\"Huffman Tree\")\n\t}\n\the.root = root\n\the.dict = make(map[byte]Huffcode)\n\tfilldict(he.root, \"\", he.dict)\n\the.bw = bs.NewWriter(wc)\n\treturn he\n}",
"func Encode(in, out *os.File) {\n\tcounts := count(in)\n\tp := makePQ(counts)\n\th := makeHuffman(p)\n\tm := make(map[byte]string)\n\tfillMap(h, m, \"\")\n\tfor k, v := range m {\n\t\tfmt.Printf(\"k: %c, v: %s\\n\", k, v)\n\t}\n}",
"func (this *Codec) serialize(root *TreeNode) string {\r\n if root == nil {\r\n return \"\"\r\n }\r\n s := strconv.Itoa(root.Val)\r\n return s + \" \" + this.serialize(root.Left) + \" \" + this.serialize(root.Right)\r\n}",
"func buildCodes(root *huffmanTreeNode, code *vector.Vector, result *dictionary.Dictionary) {\n\tif root == nil {\n\t\treturn\n\t}\n\n\tif isLeafNode(root) {\n\t\tresult.Set(root.value, code)\n\t}\n\n\tbuildCodes(root.left, code.AppendToCopy(byte(0)), result)\n\tbuildCodes(root.right, code.AppendToCopy(byte(1)), result)\n}",
"func main() {\n\tcodec := Constructor()\n\troot := &TreeNode{3, &TreeNode{4, &TreeNode{6, nil, nil}, nil},\n\t\t&TreeNode{5, &TreeNode{7, nil, nil}, nil}}\n\tdata := codec.serialize(root)\n\tprintln(data)\n\tnewRoot := codec.deserialize(data)\n\tprintln(codec.serialize(newRoot))\n}",
"func (this *Codec) serialize(root *TreeNode) string {\n\tif root == nil {\n\t\treturn \"\"\n\t}\n\tans := make([]string, 0, 10)\n\tserialize(root, &ans)\n\n\treturn strings.Join(ans, \",\")\n}",
"func (enc *HuffmanEncoder) ShowHuffTree() {\n\ttraverse(enc.root, \"\")\n}",
"func printCodes(tree HuffmanTree, prefix []byte, codes *[]HuffmanCode){\n\tswitch i := tree.(type) {\n\tcase HuffmanLeaf:\n\t\t// If this is a leaf node, then it contains one of the input\n\t\t// characters, print the character and its code from byte[]\n\t\tc := HuffmanCode{\n\t\t\tvalue: i.value,\n\t\t\tfreq: i.freq,\n\t\t\tcode: string(prefix),\n\t\t}\n\t\t*codes = append(*codes, c)\n\tcase HuffmanNode:\n\t\t// Assign 0 to left edge and recur\n\t\tprefix = append(prefix, '0')\n\t\tprintCodes(i.left, prefix, codes)\n\t\tprefix = prefix[:len(prefix)-1]\n\n\t\t// Assign 1 to right edge and recur\n\t\tprefix = append(prefix, '1')\n\t\tprintCodes(i.right, prefix, codes)\n\t\tprefix = prefix[:len(prefix)-1]\n\t}\n}",
"func printCodes(tree HuffmanTree, prefix []byte) {\n\tswitch i := tree.(type) {\n\tcase HuffmanLeaf:\n\t\t// If this is a leaf node, then it contains one of the input\n\t\t// characters, print the character and its code from byte[]\n\t\tfmt.Printf(\"%c\\t%d\\t%s\\n\", i.value, i.freq, string(prefix))\n\tcase HuffmanNode:\n\t\t// Assign 0 to left edge and recur\n\t\tprefix = append(prefix, '0')\n\t\tprintCodes(i.left, prefix)\n\t\tprefix = prefix[:len(prefix)-1]\n\n\t\t// Assign 1 to right edge and recur\n\t\tprefix = append(prefix, '1')\n\t\tprintCodes(i.right, prefix)\n\t\tprefix = prefix[:len(prefix)-1]\n\t}\n}",
"func (this *ChuckEncoder) Encode(bit rune, count int) {\n\ttypeBlock := typeBlockFor(bit)\n\tencBlock := strings.Repeat(\"0\", count)\n\t// fmt.Fprintf(\n\t// \tos.Stderr,\n\t// \t\"Encode(%q, %d): typeBlock=%q, encBlock=%q\\n\",\n\t// \tbit, count, typeBlock, encBlock,\n\t// )\n\t*this = append(*this, typeBlock, encBlock)\n}",
"func Encode(value Encodable) ([]byte, error) {\n\tif cbgEncodable, ok := value.(cborgen.CBORMarshaler); ok {\n\t\tbuf := new(bytes.Buffer)\n\t\terr := cbgEncodable.MarshalCBOR(buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\tif ipldEncodable, ok := value.(ipld.Node); ok {\n\t\tbuf := new(bytes.Buffer)\n\t\terr := dagcbor.Encode(ipldEncodable, buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\treturn cbor.DumpObject(value)\n}",
"func encodeStream(r io.Reader, buffer bool) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tif !buffer {\n\t\tw := NewWriter(&buf)\n\t\t_, err := io.Copy(w, r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = w.Close()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn buf.Bytes(), nil\n\t}\n\n\tw := NewWriter(&buf)\n\t_, err := io.Copy(w, r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = w.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf.Bytes(), nil\n}",
"func main() {\n\troot := TreeNode{\n\t\tVal: 1,\n\t\tLeft: &TreeNode{\n\t\t\tVal: 2,\n\t\t\tLeft: nil,\n\t\t\tRight: nil,\n\t\t},\n\t\tRight: &TreeNode{\n\t\t\tVal: 3,\n\t\t\tLeft: &TreeNode{\n\t\t\t\tVal: 4,\n\t\t\t\tRight: nil,\n\t\t\t\tLeft: nil,\n\t\t\t},\n\t\t\tRight: &TreeNode{\n\t\t\t\tVal: 5,\n\t\t\t\tRight: nil,\n\t\t\t\tLeft: nil,\n\t\t\t},\n\t\t},\n\t}\n\tobj := Constructor()\n\tdata := obj.serialize(&root)\n\tfmt.Println(data)\n}",
"func (head *Node) WriteHeader(w *bitio.Writer, freq map[uint8]uint) (err error) {\n\tvar nEncoded uint32\n\tfor _, v := range freq {\n\t\tnEncoded += uint32(v)\n\t}\n\n\t// Write total number of encoded symbols\n\tw.TryWriteBitsUnsafe(uint64(nEncoded), 32)\n\n\t// Write total number of symbols in graph\n\tw.TryWriteBitsUnsafe(uint64(len(freq)), 8)\n\n\t// Write encoding tree information\n\tif err = head.writeHeader(w); err != nil {\n\t\treturn err\n\t}\n\tw.TryWriteBitsUnsafe(0, 1)\n\treturn w.TryError\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
AllTypes returns all of the value types. | func AllTypes() []Type {
return []Type{TypeInt, TypeUInt, TypeFloat, TypeBool, TypeString}
} | [
"func TypeValues() []Type {\n\treturn _TypeValues\n}",
"func (pkg *Package) ValuesOfType(typeName string) ([]string, map[string]bool, error) {\n\tvar values, inspectErrs []string\n\ttmplsToExclude := map[string]bool{}\n\n\tfor _, file := range pkg.files {\n\t\tast.Inspect(file, func(node ast.Node) bool {\n\t\t\tswitch decl := node.(type) {\n\t\t\tcase *ast.GenDecl:\n\t\t\t\tswitch decl.Tok {\n\t\t\t\tcase token.CONST:\n\t\t\t\t\tvs, err := pkg.constOfTypeIn(typeName, decl)\n\t\t\t\t\tvalues = append(values, vs...)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tinspectErrs = append(inspectErrs, err.Error())\n\t\t\t\t\t}\n\n\t\t\t\tcase token.VAR:\n\t\t\t\t\tvs := pkg.varOfTypeIn(typeName, decl)\n\t\t\t\t\tfor k, v := range vs {\n\t\t\t\t\t\ttmplsToExclude[k] = v\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn true\n\t\t\t\t}\n\n\t\t\tcase *ast.FuncDecl:\n\t\t\t\tvs := pkg.methodsOfTypeIn(typeName, decl)\n\t\t\t\tfor k, v := range vs {\n\t\t\t\t\ttmplsToExclude[k] = v\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn true\n\t\t\t}\n\n\t\t\treturn false\n\t\t})\n\t}\n\n\tif len(inspectErrs) > 0 {\n\t\treturn nil, nil, fmt.Errorf(\"inspecting code:\\n\\t%v\", strings.Join(inspectErrs, \"\\n\\t\"))\n\t}\n\tif len(values) == 0 {\n\t\treturn nil, nil, fmt.Errorf(\"no values defined for type %s\", typeName)\n\t}\n\n\treturn values, tmplsToExclude, nil\n}",
"func Type_Values() []string {\n\treturn []string{\n\t\tTypeRelationship,\n\t\tTypeString,\n\t\tTypeLong,\n\t\tTypeBoolean,\n\t\tTypeInteger,\n\t\tTypeDouble,\n\t\tTypeList,\n\t\tTypeMap,\n\t}\n}",
"func (Type) Values() []Type {\n\treturn []Type{\n\t\t\"PullRequest\",\n\t\t\"RepositoryAnalysis\",\n\t}\n}",
"func TestListAllTypes() []glib.Type {\n\tvar types *C.GType\n\tvar clen C.guint\n\n\ttypes = C.gtk_test_list_all_types(&clen)\n\tdefer C.free(unsafe.Pointer(types))\n\n\tlength := uint(clen)\n\n\ttypeReturn := make([]glib.Type, length)\n\tfor i := uint(0); i < length; i++ {\n\t\tcurrent := (*C.GType)(pointerAtOffset(unsafe.Pointer(types), unsafe.Sizeof(*types), i))\n\t\ttypeReturn[i] = glib.Type(*current)\n\t}\n\treturn typeReturn\n}",
"func SimpleType_Values() []SimpleType {\n\treturn []SimpleType{\n\t\tSimpleTypeBool,\n\t\tSimpleTypeByte,\n\t\tSimpleTypeInt8,\n\t\tSimpleTypeInt16,\n\t\tSimpleTypeInt32,\n\t\tSimpleTypeInt64,\n\t\tSimpleTypeFloat64,\n\t\tSimpleTypeString,\n\t\tSimpleTypeStructEmpty,\n\t}\n}",
"func (Type) Values() []Type {\n\treturn []Type{\n\t\t\"ACCOUNT\",\n\t\t\"ORGANIZATION\",\n\t}\n}",
"func Type_Values() []string {\n\treturn []string{\n\t\tTypeOpenApi3,\n\t}\n}",
"func GetAllTypes() []string {\n\ttypes := make([]string, len(monitorAPI.MessageTypeNames))\n\ti := 0\n\tfor k := range monitorAPI.MessageTypeNames {\n\t\ttypes[i] = k\n\t\ti++\n\t}\n\tsort.Strings(types)\n\treturn types\n}",
"func DataTypeValues() []DataType {\n\treturn _DataTypeValues\n}",
"func AllOfType(types []dgo.Type) dgo.Type {\n\tl := len(types)\n\tswitch l {\n\tcase 0:\n\t\t// And of no types is an unconstrained type\n\t\treturn DefaultAnyType\n\tcase 1:\n\t\treturn types[0]\n\t}\n\tts := make([]dgo.Value, l)\n\tfor i := range types {\n\t\tts[i] = types[i]\n\t}\n\treturn &allOfType{slice: ts, frozen: true}\n}",
"func (CollectionType) Values() []CollectionType {\n\treturn []CollectionType{\n\t\t\"SEARCH\",\n\t\t\"TIMESERIES\",\n\t\t\"VECTORSEARCH\",\n\t}\n}",
"func (TypeHint) Values() []TypeHint {\n\treturn []TypeHint{\n\t\t\"JSON\",\n\t\t\"UUID\",\n\t\t\"TIMESTAMP\",\n\t\t\"DATE\",\n\t\t\"TIME\",\n\t\t\"DECIMAL\",\n\t}\n}",
"func verifyTypeValues() []verifyType {\n\treturn _verifyTypeValues\n}",
"func AnyOfType(types []dgo.Type) dgo.Type {\n\tl := len(types)\n\tswitch l {\n\tcase 0:\n\t\t// Or of no types doesn't represent any values at all\n\t\treturn notAnyType\n\tcase 1:\n\t\treturn types[0]\n\t}\n\tts := make([]dgo.Value, l)\n\tfor i := range types {\n\t\tts[i] = types[i]\n\t}\n\treturn &anyOfType{slice: ts, frozen: true}\n}",
"func (FeatureType) Values() []FeatureType {\n\treturn []FeatureType{\n\t\t\"Integral\",\n\t\t\"Fractional\",\n\t\t\"String\",\n\t}\n}",
"func (MembershipType) Values() []MembershipType {\n\treturn []MembershipType{\n\t\t\"static\",\n\t\t\"igmp\",\n\t}\n}",
"func (ModelInfrastructureType) Values() []ModelInfrastructureType {\n\treturn []ModelInfrastructureType{\n\t\t\"RealTimeInference\",\n\t}\n}",
"func FeatureType_Values() []string {\n\treturn []string{\n\t\tFeatureTypeIntegral,\n\t\tFeatureTypeFractional,\n\t\tFeatureTypeString,\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Valid returns if the current type is one of AllTypes | func (t Type) Valid() bool {
for _, typ := range AllTypes() {
if t == typ {
return true
}
}
return false
} | [
"func (ut UploadType) Valid() bool {\n\tfor _, value := range []string{\n\t\tstring(UploadTypeUSER),\n\t\tstring(UploadTypePRIME),\n\t} {\n\t\tif string(ut) == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (i ImageType) Valid() bool {\n\tswitch string(i) {\n\tcase \"\", \"local\", \"all\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}",
"func (t Type) Valid() bool {\n\treturn t >= CONNECT && t <= DISCONNECT\n}",
"func ValidType(t Type) bool {\n\treturn StandardType(t) || CustomType(t)\n}",
"func (typ Type) HasAll(t Type) bool { return typ&t == t }",
"func (u Unit) IsAnyType(types ...api.UnitTypeID) bool {\n\tfor _, unitType := range types {\n\t\tif u.UnitType == unitType {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (t Type) Is(ty Type) bool {\n\treturn t&ty != 0\n}",
"func (k Kind) IsAnyOf(of Kind) bool {\n\treturn k&of != BottomKind\n}",
"func (t Type) IsValid() bool {\n\treturn valid_start < t && t < valid_end\n}",
"func (me Tokens) IsAnyOneOf(any ...string) bool {\n\treturn len(me) == 1 && me[0].IsAnyOneOf(any...)\n}",
"func (tagType Type) IsValid() bool {\n\treturn tagType == TypeByte ||\n\t\ttagType == TypeASCII ||\n\t\ttagType == TypeASCIINoNul ||\n\t\ttagType == TypeShort ||\n\t\ttagType == TypeLong ||\n\t\ttagType == TypeRational ||\n\t\ttagType == TypeSignedLong ||\n\t\ttagType == TypeSignedRational ||\n\t\ttagType == TypeUndefined\n}",
"func (t QuotaType) IsValid() bool {\n\treturn t == HardQuota || t == FIFOQuota\n}",
"func (c TransformIOType) IsValid() bool {\n\tswitch c {\n\tcase TransformIOTypeString, TransformIOTypeBool, TransformIOTypeInt, TransformIOTypeInt64, TransformIOTypeFloat64, TransformIOTypeObject, TransformIOTypeArray:\n\t\treturn true\n\t}\n\treturn false\n}",
"func (ar ARType) IsValid() bool {\n\tif ar < AssessmentAR || ar > SubAssessmentAR {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func typeIsValid(typ types.Type) bool {\n\t// Check named types separately, because we don't want\n\t// to call Underlying() on them to avoid problems with recursive types.\n\tif _, ok := typ.(*types.Named); ok {\n\t\treturn true\n\t}\n\n\tswitch typ := typ.Underlying().(type) {\n\tcase *types.Basic:\n\t\treturn typ.Kind() != types.Invalid\n\tcase *types.Array:\n\t\treturn typeIsValid(typ.Elem())\n\tcase *types.Slice:\n\t\treturn typeIsValid(typ.Elem())\n\tcase *types.Pointer:\n\t\treturn typeIsValid(typ.Elem())\n\tcase *types.Map:\n\t\treturn typeIsValid(typ.Key()) && typeIsValid(typ.Elem())\n\tcase *types.Chan:\n\t\treturn typeIsValid(typ.Elem())\n\tcase *types.Signature:\n\t\treturn typeIsValid(typ.Params()) && typeIsValid(typ.Results())\n\tcase *types.Tuple:\n\t\tfor i := 0; i < typ.Len(); i++ {\n\t\t\tif !typeIsValid(typ.At(i).Type()) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase *types.Struct, *types.Interface:\n\t\t// Don't bother checking structs, interfaces for validity.\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}",
"func (me TorganizationTypes) IsUnit() bool { return me.String() == \"unit\" }",
"func (r ValidationErrors) Any() bool {\n\treturn len(r.Errors) > 0\n}",
"func (right Access) IsOneOf(rights ...Access) bool {\n\tif len(rights) == 0 {\n\t\treturn false\n\t}\n\tfor _, r := range rights {\n\t\tif right == r {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (s State) IsOneOf(targets ...State) bool {\n\tfor _, target := range targets {\n\t\tif s == target {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ConcatSlice Returns a Concatenanted byte array into string | func ConcatSlice(sliceToConcat []byte) string {
var dummy string
for index := 0; index < len(sliceToConcat)-1; index++ {
dummy = dummy + string(sliceToConcat[index]) + "-"
}
dummy = dummy + string(sliceToConcat[len(sliceToConcat)-1])
return dummy
} | [
"func ConcatSlice(sliceToConcat []byte) string {\n\tstringRep := \"\"\n\n\tfor index := 0; index < len(sliceToConcat); index++ {\n\t\tstringRep = stringRep + string(sliceToConcat[index])\n\n\t\tif index+1 != len(sliceToConcat) {\n\t\t\tstringRep = stringRep + \"-\"\n\t\t}\n\t}\n\n\treturn stringRep\n}",
"func ConcatByteSlice(slice1, slice2 []byte) []byte {\n\tnew_slice := make([]byte, len(slice1)+len(slice2))\n\tcopy(new_slice, slice1)\n\tcopy(new_slice[len(slice1):], slice2)\n\treturn new_slice\n}",
"func ConcatenateBytes(data ...[]byte) []byte {\n\tfinalLength := 0\n\tfor _, slice := range data {\n\t\tfinalLength += len(slice)\n\t}\n\tresult := make([]byte, finalLength)\n\tlast := 0\n\tfor _, slice := range data {\n\t\tfor i := range slice {\n\t\t\tresult[i+last] = slice[i]\n\t\t}\n\t\tlast += len(slice)\n\t}\n\treturn result\n}",
"func ConcatByteSlices(arrays ...[]byte) []byte {\n\treturn concatByteSlices(arrays...)\n}",
"func ConcatAppend(slices [][]byte) []byte {\n\tvar tmp []byte\n\tfor _, s := range slices {\n\t\ttmp = append(tmp, s...)\n\t}\n\treturn tmp\n}",
"func Bconcat(slices ...[]byte) []byte {\n\tvar totalLen int\n\tfor _, s := range slices {\n\t\ttotalLen += len(s)\n\t}\n\ttmp := make([]byte, totalLen)\n\tvar i int\n\tfor _, s := range slices {\n\t\ti += copy(tmp[i:], s)\n\t}\n\treturn tmp\n}",
"func concat(a string, b string) string {\n\tvals := make([]byte, 0, 10)\n\tvals = append(vals, a...)\n\tvals = append(vals, b...)\n\treturn string(vals)\n}",
"func AppendByte(slice []byte, data ...byte) []byte {\n m := len(slice)\n n := m + len(data)\n if n > cap(slice) { // if necessary, reallocate\n // allocate double what's needed, for future growth.\n newSlice := make([]byte, (n+1)*2)\n copy(newSlice, slice)\n slice = newSlice\n }\n\tfmt.Println(slice)\n slice = slice[0:n]\n\tfmt.Println(slice)\n\tfmt.Println(slice[m:n])\n copy(slice[m:n], data)\n\tfmt.Println(slice)\n return slice\n}",
"func JoinBytes(separator []byte, byteArray ...[]byte) []byte {\n\n\tvar buffer bytes.Buffer\n\tvar max int = len(byteArray) - 1\n\tfor vi, v := range byteArray {\n\t\tbuffer.Write(v)\n\t\tif vi < max {\n\t\t\tbuffer.Write(separator)\n\t\t}\n\t}\n\treturn buffer.Bytes()\n\n}",
"func (c *Client) Concat(ctx context.Context, p *ConcatPayload) (res string, err error) {\n\tvar ires any\n\tires, err = c.ConcatEndpoint(ctx, p)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ires.(string), nil\n}",
"func CopySlice(slice []byte) []byte {\n\tcopy := append(slice[:0:0], slice...)\n\treturn copy\n}",
"func joinBytes(args ...[]byte) []byte {\n\treturn bytes.Join(args, []byte{})\n}",
"func (s Slice) Concat(slices ...[]interface{}) Slice {\n\tslicesSlice := make([]interface{}, (len(slices) + 1))\n\tslicesSlice[0] = s\n\tfor i := 0; i < len(slices); i++ {\n\t\tslicesSlice[i+1] = slices[i]\n\t}\n\n\tr := concat(slicesSlice...)\n\treturn r.([]interface{})\n}",
"func append(arr1 []byte, arr2 []byte) []byte {\n\tarr1Len := len(arr1);\n\tnewLen := len(arr1) + len(arr2);\n\tresult := make([]byte, newLen);\n\t\n\tfor i:= 0; i < arr1Len; i++ {\n\t\tresult[i] = arr1[i];\n\t};\n\t\n\tfor i := 0; i < len(arr2); i++ {\n\t\tresult[i + arr1Len] = arr2[i];\n\t};\n\treturn result;\n}",
"func BytesCombine(pBytes ...[]byte) []byte {\n\treturn bytes.Join(pBytes, []byte(\"\"))\n}",
"func ExtendByteSlice(b []byte, needLen int) []byte {\n\treturn bytes.Extend(b, needLen)\n}",
"func testAppendStringToByteSlice() {\n\tfmt.Println(\"testAppendStringToByteSlice\")\n\ts := []byte(\"hello\")\n\ts = append(s, \" world\"...)\n\tfmt.Println(s)\n\tfmt.Println()\n}",
"func (s Slice) Prepend(elems ...byte) Slice {\n\tpad := len(elems)\n\tif pad%wordSize != 0 || pad > s.Pad() {\n\t\tl := wordSize + len(elems) + len(s)\n\t\treturn append(append(make([]byte, wordSize, l), elems...), s...)[wordSize:]\n\t}\n\n\thdr := (*reflect.SliceHeader)(unsafe.Pointer(&s))\n\thdr.Data -= uintptr(pad)\n\thdr.Cap += pad\n\thdr.Len += pad\n\n\tcopy(s, elems)\n\treturn s\n}",
"func byteSliceToString(b []byte) string {\n\treturn *(*string)(unsafe.Pointer(&b))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
UseHTTPClient allows the default http client to be overriden for calls to the flow service. This function must be called prior to flows.WithFlow to take effect (e.g. from an init method) | func UseHTTPClient(client *http.Client) {
httpClient = client
} | [
"func (o *GetDataflowsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OptionsUsageParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetTaskStatesDeprecatedParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (c *Client) SetHTTPClient(client *http.Client) {\n\tif client == nil {\n\t\tc.httpClient = http.DefaultClient\n\t\treturn\n\t}\n\tc.httpClient = client\n}",
"func (o *IntegrationsManualHTTPSCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRuleChainParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UpdateFlowParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNetworkExternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetIPLoadbalancingServiceNameTaskParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostIPLoadbalancingServiceNameHTTPFrontendParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeprecatedCycleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetCustomRuleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateFlowVersionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *HandleGetAboutUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNetworkAppliancePortParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UpdateNetworkSecurityIntrusionSettingsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *HasFlowRunningByFlowIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetUsageParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UpdateApplicationDetectionRulesOrderParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RegisterAction registers a go function so it can be used as an action in a flow stage | func RegisterAction(actionFunc interface{}) {
if reflect.TypeOf(actionFunc).Kind() != reflect.Func {
panic("Action must be a function!")
}
actions[getActionKey(actionFunc)] = actionFunc
} | [
"func Register(name string, initFunc InitFunc) error {\n\tif _, exists := actions[name]; exists {\n\t\treturn fmt.Errorf(\"action name already registered %s\", name)\n\t}\n\tactions[name] = initFunc\n\n\treturn nil\n}",
"func RegisterAction() {\n\tengine.RegisterAction(\"cron\", NewCron)\n}",
"func NewRegisterAction(dispatch Dispatch, executor cf.Executor) *HTTPRegisterAction {\n\treturn &HTTPRegisterAction{dispatch, executor}\n}",
"func Register(action string, handler WebhookHandler) {\n\thandlerMap[action] = handler\n}",
"func RegulatedAction() {\n\t// Some expensive action goes on here\n}",
"func (c *MockClient) RegisterActionEvent(actions chan<- service.DIDCommAction) error {\n\tif c.ActionEventFunc != nil {\n\t\treturn c.ActionEventFunc(actions)\n\t}\n\n\treturn nil\n}",
"func (s *MockClient) RegisterActionEvent(ch chan<- service.DIDCommAction) error {\n\tif s.RegisterActionFunc != nil {\n\t\treturn s.RegisterActionFunc(ch)\n\t}\n\n\treturn nil\n}",
"func Register(callback func(action schema.Action)) ID {\r\n\tidCounter++\r\n\tid := idCounter\r\n\tcallbacks[id] = callback\r\n\treturn id\r\n}",
"func (server *Server) Register(action INetworkAction) {\n\n}",
"func (client *Client) Register(action INetworkAction) {\n\tclient.actions = append(client.actions, action)\n}",
"func AddActionAction(c *gin.Context) {\n\tresult := render.NewResult()\n\tdefer c.JSON(http.StatusOK, result)\n\n\taction := &model.Action{}\n\tif err := c.BindJSON(action); nil != err {\n\t\tresult.Error(err)\n\n\t\treturn\n\t}\n\n\tsrv := service.FromContext(c)\n\tif err := srv.Actions.Create(c, action); nil != err {\n\t\tresult.Error(err)\n\t}\n}",
"func (self *Engine) RegisterGoFunction(name string) {\n\t// Keep the go function in memory...\n\tif !Utility.Contains(self.goFunctions, name) {\n\t\tself.goFunctions = append(self.goFunctions, name)\n\t}\n\n\taction := make(map[string]interface{})\n\taction[\"id\"] = \"RegisterGoFunction\"\n\taction[\"name\"] = name\n\n\taction[\"done\"] = make(chan bool, 0)\n\tself.actions <- action\n\n\t<-action[\"done\"].(chan bool)\n}",
"func (o *Observer) RegisterAction(topic string, ch <-chan service.DIDCommAction) {\n\tgo func() {\n\t\tfor action := range ch {\n\t\t\to.notify(topic, toAction(action))\n\t\t}\n\t}()\n}",
"func Register(action RegisterAction) cli.Command {\n\treturn cli.Command{\n\n\t\tName: \"register\",\n\t\tAliases: []string{\"r\"},\n\t\tUsage: \"register an app with async event\",\n\t\tAction: func(c *cli.Context) error {\n\t\t\tsourcename := c.Args().Get(0)\n\t\t\troute := c.Args().Get(1)\n\t\t\treturn action.RegisterService(sourcename, route)\n\t\t},\n\t}\n}",
"func NewAction(name string, arg interface{}) {\n\tDefaultActionRegistry.Post(name, arg)\n}",
"func (ma MagicActions) Register(actions ...MagicAction) {\n\tfor _, action := range actions {\n\t\tma[action.Keyword()] = action\n\t}\n}",
"func (ar *ActionRepository) Register(matchingPath string, name string, method actionMethod) {\n\tar.actions[name] = method\n\t_, ok := ar.actionsForPath[matchingPath]\n\tif !ok {\n\t\tar.actionsForPath[matchingPath] = make(map[string]actionMethod)\n\t}\n\tar.actionsForPath[matchingPath][name] = method\n}",
"func (controller *TesterController) RegisterActions(actionsHandler *ActionsHandler) {\n\tactionsHandler.RegisterConnectedAction(controller.connected)\n\tactionsHandler.RegisterDisconnectedAction(controller.connected)\n\tactionsHandler.RegisterErrorAction(controller.error)\n}",
"func Register(kind Kind, create CreateFn) error {\n\tif !kind.Validate() {\n\t\treturn errors.Errorf(\"invalid kind: %s\", kind)\n\t}\n\tif _, ok := Factory[kind]; ok {\n\t\treturn errors.Errorf(\"duplicate to register action executor: %s\", kind)\n\t}\n\tFactory[kind] = create\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
unionRegexp separates values with a | operator to create a string representing a union of regexp patterns. | func unionRegexp(values []string) string {
if len(values) == 0 {
// As a regular expression, "()" and "" are equivalent so this
// condition wouldn't ordinarily be needed to distinguish these
// values. But, our internal search engine assumes that ""
// implies "no regexp" (no values), while "()" implies "match
// empty regexp" (all values) for file patterns.
return ""
}
if len(values) == 1 {
// Cosmetic format for regexp value, wherever this happens to be
// pretty printed.
return values[0]
}
return "(" + strings.Join(values, ")|(") + ")"
} | [
"func NewUnion(l, r Regex) Regex {\n\tswitch l.(type) {\n\tcase *empty:\n\t\treturn r\n\tdefault:\n\t\treturn &union{\n\t\t\tl: l,\n\t\t\tr: r,\n\t\t}\n\t}\n}",
"func MakeRegexOr(ss []string) string {\n\tvar s string\n\tfor _, v := range ss {\n\t\ts = s + v + `|`\n\t}\n\n\t//no need for an | a the end\n\ts = s[0 : len(s)-1]\n\n\treturn s\n}",
"func RegexpSimplify(re *syntax.Regexp,) *syntax.Regexp",
"func Union(ops ...Operator) *UnionOperator {\n\treturn &UnionOperator{Ops: ops}\n}",
"func CombinedPatternsGiven(patterns []*regexp.Regexp) *regexp.Regexp {\n\tvar allPatterns []string\n\tfor _, pattern := range patterns {\n\t\tallPatterns = append(allPatterns, pattern.String())\n\t}\n\treturn regexp.MustCompile(strings.Join(allPatterns, \"|\"))\n}",
"func Union(a, operand []string) []string {\n\tuniq := make(map[string]bool, len(a)+len(operand))\n\tfor _, elem := range a {\n\t\tuniq[elem] = true\n\t}\n\tfor _, elem := range operand {\n\t\tuniq[elem] = true\n\t}\n\tunion := make([]string, len(uniq))\n\ti := 0\n\tfor k := range uniq {\n\t\tunion[i] = k\n\t\ti++\n\t}\n\treturn union[:i]\n}",
"func unionFn(tok token.Token, env *object.Environment, args ...object.Object) object.Object {\n\terr := validateArgs(tok, \"union\", args, 2, [][]string{{object.ARRAY_OBJ}, {object.ARRAY_OBJ}})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tleft := args[0].(*object.Array).Elements\n\tright := args[1].(*object.Array).Elements\n\n\tunion := []object.Object{}\n\n\tfor _, v := range left {\n\t\tunion = append(union, v)\n\t}\n\n\tm := util.Mapify(left)\n\n\tfor _, v := range right {\n\t\t_, found := m[object.GenerateEqualityString(v)]\n\n\t\tif !found {\n\t\t\tunion = append(union, v)\n\t\t}\n\t}\n\n\treturn &object.Array{Elements: union}\n}",
"func unionSelector(a, b Selector) Selector {\n\treturn func(n *Node) bool {\n\t\treturn a(n) || b(n)\n\t}\n}",
"func (p *parser) concat() *Regexp {\n\tp.maybeConcat(-1, 0)\n\n\t// Scan down to find pseudo-operator | or (.\n\ti := len(p.stack)\n\tfor i > 0 && p.stack[i-1].Op < opPseudo {\n\t\ti--\n\t}\n\tsubs := p.stack[i:]\n\tp.stack = p.stack[:i]\n\n\t// Empty concatenation is special case.\n\tif len(subs) == 0 {\n\t\treturn p.push(p.newRegexp(OpEmptyMatch))\n\t}\n\n\treturn p.push(p.collapse(subs, OpConcat))\n}",
"func Unionize(lhs, rhs SelectStatement, unionType string, by OrderBy, limit *Limit, lock string) *Union {\n\n\tunion, isUnion := lhs.(*Union)\n\tif isUnion {\n\t\tunion.UnionSelects = append(union.UnionSelects, &UnionSelect{UnionType: unionType, Statement: rhs})\n\t\tunion.OrderBy = by\n\t\tunion.Limit = limit\n\t\tunion.Lock = lock\n\t\treturn union\n\t}\n\n\treturn &Union{FirstStatement: lhs, UnionSelects: []*UnionSelect{{UnionType: unionType, Statement: rhs}}, OrderBy: by, Limit: limit, Lock: lock}\n}",
"func TestUnion(t *testing.T) {\n\ttests := []struct {\n\t\tnote string\n\t\trules []string\n\t\texpected interface{}\n\t}{\n\t\t{\"union_0_sets\", []string{`p = x { union(set(), x) }`}, \"[]\"},\n\t\t{\"union_2_sets\", []string{`p = x { union({set(), {1, 2}}, x) }`}, \"[1, 2]\"},\n\t\t{\"union_2_sets\", []string{`p = x { s1 = {1, 2, 3}; s2 = {2}; union({s1, s2}, x) }`}, \"[1, 2, 3]\"},\n\t\t{\"union_3_sets\", []string{`p = x { s1 = {1, 2, 3}; s2 = {2, 3, 4}; s3 = {4, 5, 6}; union({s1, s2, s3}, x) }`}, \"[1, 2, 3, 4, 5, 6]\"},\n\t\t{\"union_4_sets\", []string{`p = x { s1 = {\"a\", \"b\", \"c\", \"d\"}; s2 = {\"b\", \"c\", \"d\"}; s3 = {\"c\", \"d\"}; s4 = {\"d\"}; union({s1, s2, s3, s4}, x) }`}, \"[\\\"a\\\", \\\"b\\\", \\\"c\\\", \\\"d\\\"]\"},\n\t}\n\n\tdata := loadSmallTestData()\n\n\tfor _, tc := range tests {\n\t\trunTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)\n\t}\n}",
"func unconcat(r *syntax.Regexp) (bool, *syntax.Regexp) {\n\tswitch {\n\tcase r.Op == syntax.OpConcat && len(r.Sub) <= 1:\n\t\tif len(r.Sub) == 1 {\n\t\t\treturn true, r.Sub[0]\n\t\t}\n\n\t\treturn true, &syntax.Regexp{\n\t\t\tOp: syntax.OpEmptyMatch,\n\t\t\tFlags: r.Flags,\n\t\t}\n\n\tcase r.Op == syntax.OpRepeat && r.Min == r.Max && r.Min == 1:\n\t\treturn true, r.Sub[0]\n\t}\n\n\treturn false, r\n}",
"func (wm WordMask) union(other WordMask) WordMask {\n\treturn wm | other\n}",
"func union(a, b []string) [][]rune {\n\tm := make(map[string]bool)\n\tfor _, item := range a {\n\t\tm[item] = true\n\t}\n\tfor _, item := range b {\n\t\tif _, ok := m[item]; !ok {\n\t\t\ta = append(a, item)\n\t\t}\n\t}\n\n\t// Convert a to rune matrix (with x -> words and y -> characters)\n\tout := make([][]rune, len(a))\n\tfor i, word := range a {\n\t\tout[i] = []rune(word)\n\t}\n\treturn out\n}",
"func UUIDRegexpMux() *regexp.Regexp {\n\treturn regexp.MustCompile(uuidV4Regexp)\n}",
"func encodeRegexp(buf *bytes.Buffer, name string, val Regexp) error {\n\t// type\n\tif err := buf.WriteByte(_REGEXP); err != nil {\n\t\treturn err\n\t}\n\n\t// name\n\tif err := writeCstring(buf, name); err != nil {\n\t\treturn err\n\t}\n\n\t// regex\n\tif err := writeCstring(buf, val.Pattern); err != nil {\n\t\treturn err\n\t}\n\n\t// options\n\treturn writeCstring(buf, val.Options)\n}",
"func NewUnionExpr(a, b Expr) Expr {\n\treturn newBinExpr(a, b, \"|\", \"(%s | %s)\",\n\t\tfunc(a, b Value, _ Scope) (Value, error) {\n\t\t\tif x, ok := a.(Set); ok {\n\t\t\t\tif y, ok := b.(Set); ok {\n\t\t\t\t\treturn Union(x, y), nil\n\t\t\t\t}\n\t\t\t\treturn nil, errors.Errorf(\"<&> rhs must be a Set, not %T\", b)\n\t\t\t}\n\t\t\treturn nil, errors.Errorf(\"<&> lhs must be a Set, not %T\", a)\n\t\t})\n}",
"func selectByRegexp(vals, regexps []string) ([]string, error) {\n\tvar matches []string\n\tfor _, s := range vals {\n\t\tfor _, r := range regexps {\n\t\t\tfound, err := regexp.MatchString(r, s)\n\t\t\tif err != nil {\n\t\t\t\treturn matches, err\n\t\t\t}\n\t\t\tif found {\n\t\t\t\tmatches = append(matches, s)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn matches, nil\n}",
"func U(ranges ...string) Pattern {\n\t// preprocessing names\n\tiset := make(map[string]bool)\n\teset := make(map[string]bool)\n\tfor _, name := range ranges {\n\t\tif strings.HasPrefix(name, \"-\") {\n\t\t\teset[name[1:]] = true\n\t\t} else {\n\t\t\tiset[name] = true\n\t\t}\n\t}\n\tinames := make([]string, 0, len(iset))\n\tfor name := range iset {\n\t\tinames = append(inames, name)\n\t}\n\tenames := make([]string, 0, len(eset))\n\tfor name := range eset {\n\t\tenames = append(enames, name)\n\t}\n\n\t// choose underlying type\n\tswitch {\n\tcase len(inames) == 0 && len(enames) == 0:\n\t\treturn False\n\tcase len(enames) == 0:\n\t\tpat := &patternUnicodeRanges{not: false}\n\t\terr := pat.set(inames)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn pat\n\tcase len(inames) == 0:\n\t\tpat := &patternUnicodeRanges{not: true}\n\t\terr := pat.set(enames)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn pat\n\tdefault:\n\t\tpat := &patternUnicodeRangesWithExcluding{}\n\t\tpat.include.not = false\n\t\terr := pat.include.set(inames)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tpat.exclude.not = true\n\t\terr = pat.exclude.set(enames)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn pat\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
langToFileRegexp converts a lang: parameter to its corresponding file patterns for file filters. The lang value must be valid, cf. validate.go | func langToFileRegexp(lang string) string {
lang, _ = enry.GetLanguageByAlias(lang) // Invariant: lang is valid.
extensions := enry.GetLanguageExtensions(lang)
patterns := make([]string, len(extensions))
for i, e := range extensions {
// Add `\.ext$` pattern to match files with the given extension.
patterns[i] = regexp.QuoteMeta(e) + "$"
}
return unionRegexp(patterns)
} | [
"func FileExtToLanguage(ext string) (Language, error) {\n\tswitch {\n\tcase ext == \"c\":\n\t\treturn LangC, nil\n\tcase ext == \"cpp\" || ext == \"cxx\" || ext == \"C\":\n\t\treturn LangCPP, nil\n\t\t//\tcase ext == \"java\":\n\t\t//\t\treturn LangJava, nil\n\t\t//\tcase ext == \"py\":\n\t\t//\t\treturn LangPython2, nil\n\t\t//\tcase ext == \"py3\":\n\t\t//\t\treturn LangPython3, nil\n\t\t//\tcase ext == \"hs\":\n\t\t//\t\treturn LangHaskell, nil\n\t\t//\tcase ext == \"rb\":\n\t\t//\t\treturn LangRuby, nil\n\t\t//\tcase ext == \"lisp\":\n\t\t//\t\treturn LangCommonLISP, nil\n\t\t//\tcase ext == \"pas\":\n\t\t//\t\treturn LangPascal, nil\n\tcase ext == \"go\":\n\t\treturn LangGo, nil\n\tdefault:\n\t\treturn \"\", ErrInvalidLanguage(ext)\n\t}\n}",
"func LangColumnNameRegexp() *regexp.Regexp {\n\tlangColumnRegexpInitializer.Do(func() {\n\t\tr, err := regexp.Compile(\"lang_([a-z]{2})\")\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"Can't create a regexp for lang column name. Reason: %w. Please, submit an issue with the execution logs here: https://github.com/s0nerik/goloc\", err))\n\t\t}\n\t\tlangColumnRegexp = r\n\t})\n\treturn langColumnRegexp\n}",
"func NewRegexpFile(r *regexp.Regexp) *File {\n\treturn NewFile(®expElement{Data: r})\n}",
"func ruleToRegexp(text string) (string, error) {\n\tif text == \"\" {\n\t\treturn \".*\", nil\n\t}\n\n\t// already regexp?\n\tlength := len(text)\n\tif length >= 2 && text[:1] == \"/\" && text[length-1:] == \"/\" {\n\t\t// filter is a regular expression\n\t\treturn text[1 : length-1], nil\n\t}\n\n\trule := escapeSpecialRegxp.ReplaceAllStringFunc(text, func(src string) string {\n\t\treturn fmt.Sprintf(`\\%v`, src)\n\t})\n\trule = strings.Replace(rule, \"^\", `(?:[^\\\\w\\\\d_\\\\\\-.%]|$)`, -1)\n\trule = strings.Replace(rule, \"*\", \".*\", -1)\n\n\tlength = len(rule)\n\tif rule[length-1] == '|' {\n\t\trule = rule[:length-1] + \"$\"\n\t}\n\n\tif rule[:2] == \"||\" {\n\t\tif len(rule) > 2 {\n\t\t\trule = `^(?:[^:/?#]+:)?(?://(?:[^/?#]*\\\\.)?)?` + rule[2:]\n\t\t}\n\t} else if rule[0] == '|' {\n\t\trule = \"^\" + rule[1:]\n\t}\n\n\trule = re.MustCompile(`(\\|)[^$]`).ReplaceAllString(rule, `\\|`)\n\n\treturn rule, nil\n}",
"func validateLanguage(lang string) {\n\tif !contains(textgame.ReadLanguages(), lang) {\n\t\tfmt.Println(\"Unknown Language\")\n\t\tos.Exit(1)\n\t}\n}",
"func FilterLang(text, lang string) (new string) {\n\tfor _, value := range text {\n\t\tif unicode.IsLetter(value) || unicode.Is(unicode.Scripts[lang], value) {\n\t\t\tnew += string(value)\n\t\t}\n\t}\n\n\treturn\n}",
"func LangValidator(l Lang) error {\n\tswitch l {\n\tcase LangPy, LangJs:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"allocationstrategy: invalid enum value for lang field: %q\", l)\n\t}\n}",
"func readFileToRegexpList(filename string) []*regexp.Regexp {\n\tvar regexp_list = []*regexp.Regexp{}\n\tcontent, err := ioutil.ReadFile(filename)\n\tif err == nil {\n\t\tfor _, line := range strings.Split(string(content), \"\\n\") {\n\t\t\tif line != \"\" {\n\t\t\t\tregexp_list = append(regexp_list, regexp.MustCompile(line))\n\t\t\t}\n\t\t}\n\t}\n\treturn regexp_list\n}",
"func (rule *Rule) fileFilterer(filename string) bool {\n\tdidMatch, err := regexp.MatchString(rule.rawRuleText, filename)\n\tif err != nil {\n\t\tlog.Fatal(\"Broken regexp in \" + rule.containingFileAbsPath +\n\t\t\t\". Could not process regular expression \" +\n\t\t\trule.rawRuleText)\n\t}\n\n\tif didMatch {\n\t\treturn true\n\t}\n\treturn false\n}",
"func filterLang(p pageView, lang string, emit func(pageView)) {\n\tif p.Lang == lang {\n\t\temit(p)\n\t}\n}",
"func ValidateLanguage(lang string) bool {\n\tif len(lang) != 2 {\n\t\treturn false\n\t}\n\tfor _, lan := range exixtentLanguages {\n\t\tif lan == lang {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func SetLanguageFilePath(path string) string {\n\tlastPath := langFilePath\n\tlangFilePath = path\n\treturn lastPath\n}",
"func getFileRE(chrt *chart.Chart, re *regexp.Regexp) *chart.File {\n\tfor _, file := range chrt.Files {\n\t\tif re.Match([]byte(file.Name)) {\n\t\t\treturn file\n\t\t}\n\t}\n\treturn nil\n}",
"func detectLanguage(f *os.File) (string, error) {\n\tswitch filepath.Ext(f.Name()) {\n\tcase \".go\":\n\t\treturn golang, nil\n\tcase \".py\":\n\t\treturn python, nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unknown language for file %v\", f.Name())\n\t}\n}",
"func (c *Client) MatchLang(strings ...string) Lang {\n\t_, index := language.MatchStrings(c.matcher, strings...)\n\treturn c.langs[index]\n}",
"func checkLang() {\n\tif flag_lang == \"\" {\n\t\treturn\n\t}\n\n\tvar err error\n\tlangWant, err = parseLang(flag_lang)\n\tif err != nil {\n\t\tlog.Fatalf(\"invalid value %q for -lang: %v\", flag_lang, err)\n\t}\n\n\tif def := currentLang(); flag_lang != def {\n\t\tdefVers, err := parseLang(def)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"internal error parsing default lang %q: %v\", def, err)\n\t\t}\n\t\tif langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {\n\t\t\tlog.Fatalf(\"invalid value %q for -lang: max known version is %q\", flag_lang, def)\n\t\t}\n\t}\n}",
"func FileRegexpMap(fn string, delim string, match string) (map[string]string, error) {\n\t// Check file exist's.\n\tif _, err := os.Stat(fn); os.IsNotExist(err) {\n\t\treturn nil, fmt.Errorf(\"file doesn't exist: %s\", fn)\n\t}\n\n\tr, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\treturn map[string]string{}, err\n\t}\n\n\t// Compile regexp.\n\treDelim := regexp.MustCompile(delim)\n\treMatch := regexp.MustCompile(match)\n\n\t// Parse output and create key/value map.\n\tm := make(map[string]string)\n\tfor _, l := range strings.Split(string(r), \"\\n\") {\n\t\tif !reMatch.MatchString(l) {\n\t\t\tcontinue\n\t\t}\n\n\t\tv := reDelim.Split(l, -1)\n\t\tif len(v) < 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tm[strings.TrimSpace(v[0])] = strings.TrimSpace(v[1])\n\t}\n\n\treturn m, nil\n}",
"func (c *Lang) cleanLang(code string) error {\n\ts := c.langFileName(\"tmp_dir\", code)\n\terr := ioutil.WriteFile(s, []byte(\"[]\"), 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(s)\n\treturn i18n.LoadTranslationFile(s)\n}",
"func LanguageFilter(language string) SnippetFilter {\n\treturn func(snippet *snippet.Snippet) bool {\n\t\tif language == \"\" {\n\t\t\treturn true\n\t\t}\n\t\treturn snippet.GetVar(\"language\") != \"\" && snippet.GetVar(\"language\") == language\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ToTextPatternInfo converts a an atomic query to internal values that drive text search. An atomic query is a Basic query where the Pattern is either nil, or comprises only one Pattern node (hence, an atom, and not an expression). See TextPatternInfo for the values it computes and populates. | func ToTextPatternInfo(q query.Basic, p Protocol, transform query.BasicPass) *TextPatternInfo {
q = transform(q)
// Handle file: and -file: filters.
filesInclude, filesExclude := IncludeExcludeValues(q, query.FieldFile)
// Handle lang: and -lang: filters.
langInclude, langExclude := IncludeExcludeValues(q, query.FieldLang)
filesInclude = append(filesInclude, mapSlice(langInclude, langToFileRegexp)...)
filesExclude = append(filesExclude, mapSlice(langExclude, langToFileRegexp)...)
filesReposMustInclude, filesReposMustExclude := IncludeExcludeValues(q, query.FieldRepoHasFile)
selector, _ := filter.SelectPathFromString(q.FindValue(query.FieldSelect)) // Invariant: select is validated
count := count(q, p)
// Ugly assumption: for a literal search, the IsRegexp member of
// TextPatternInfo must be set true. The logic assumes that a literal
// pattern is an escaped regular expression.
isRegexp := q.IsLiteral() || q.IsRegexp()
var pattern string
if p, ok := q.Pattern.(query.Pattern); ok {
if q.IsLiteral() {
// Escape regexp meta characters if this pattern should be treated literally.
pattern = regexp.QuoteMeta(p.Value)
} else {
pattern = p.Value
}
}
if q.Pattern == nil {
// For compatibility: A nil pattern implies isRegexp is set to
// true. This has no effect on search logic.
isRegexp = true
}
negated := false
if p, ok := q.Pattern.(query.Pattern); ok {
negated = p.Negated
}
return &TextPatternInfo{
// Values dependent on pattern atom.
IsRegExp: isRegexp,
IsStructuralPat: q.IsStructural(),
IsCaseSensitive: q.IsCaseSensitive(),
FileMatchLimit: int32(count),
Pattern: pattern,
IsNegated: negated,
// Values dependent on parameters.
IncludePatterns: filesInclude,
ExcludePattern: unionRegexp(filesExclude),
FilePatternsReposMustInclude: filesReposMustInclude,
FilePatternsReposMustExclude: filesReposMustExclude,
Languages: langInclude,
PathPatternsAreCaseSensitive: q.IsCaseSensitive(),
CombyRule: q.FindValue(query.FieldCombyRule),
Index: q.Index(),
Select: selector,
}
} | [
"func make_pattern_text(T int, // size of text\n\tP int, // size of pattern.\n\tN int, // number of pattern repetitions\n) ([]byte, []byte) {\n\n\tM := int(T / P) // Max # patterns that fit in text\n\tif M < N {\n\t\tpanic(fmt.Sprintf(\"make_pattern_text M < N. T=%d,P=%d,N=%d,M=%d\", T, P, N, M))\n\t}\n\tD := int(M / N) // Distance between filled pattern slots\n\n\ttext := make([]byte, T, T) // String to be indexed and searched\n\tpattern := make([]byte, P, P) // Substring to search for\n\tunpattern := make([]byte, P, P)\n\n\tfor j := 0; j < P; j++ {\n\t\tpattern[j] = byte(j%0xFD + 1)\n\t}\n\tfor j := 0; j < P-1; j++ {\n\t\tunpattern[j] = byte(j%0xFD + 1)\n\t}\n\tunpattern[P-1] = 0xFF\n\n\t// for j := P - 10; j < P; j++ {\n\t// \tfmt.Printf(\"%5d: %3d\\n\", j, pattern[j])\n\t// }\n\n\tn := 0\n\tfor m := 0; m < M; m++ {\n\t\tt0 := m * P\n\t\tvar pat []byte\n\t\tif m%D == 0 && n < N {\n\t\t\tpat = pattern\n\t\t\tn++\n\t\t} else {\n\t\t\tpat = unpattern\n\t\t}\n\t\tfor j := 0; j < P; j++ {\n\t\t\ttext[t0+j] = pat[j]\n\t\t}\n\t}\n\n\treturn pattern, text\n}",
"func (e *Extractor) ExtractText() (string, error) {\n\tvar buf bytes.Buffer\n\n\tcstreamParser := contentstream.NewContentStreamParser(e.contents)\n\toperations, err := cstreamParser.Parse()\n\tif err != nil {\n\t\treturn buf.String(), err\n\t}\n\n\tprocessor := contentstream.NewContentStreamProcessor(*operations)\n\n\tvar codemap *cmap.CMap\n\tvar cidCodemap *cmap.CMap\n\tvar font *model.Font\n\tinText := false\n\txPos, yPos, xTx := float64(-1), float64(-1), float64(-1)\n\n\tpreRect0, preRect1, preRect2, preRect3 := float64(-1), float64(-1), float64(-1), float64(-1)\n\trect0, rect1, rect2, rect3 := float64(-1), float64(-1), float64(-1), float64(-1)\n\n\tvar cMatrix [6]float64 = [6]float64{1, 0, 0, 1, 0, 0}\n\n\tfontSize := 0.0\n\tmScaling := 100.0\n\n\tprocessor.AddHandler(contentstream.HandlerConditionEnumAllOperands, \"\",\n\t\tfunc(op *contentstream.ContentStreamOperation, f model.FontsByNames) error {\n\t\t\toperand := op.Operand\n\t\t\tswitch operand {\n\t\t\tcase \"cm\":\n\t\t\t\tif inText {\n\t\t\t\t\tcommon.Log.Debug(\"cm operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif len(op.Params) != 6 {\n\t\t\t\t\tcommon.Log.Debug(\"Error cm should only get 6 input params, got %d\", len(op.Params))\n\t\t\t\t\treturn errors.New(\"Incorrect parameter count\")\n\t\t\t\t}\n\n\t\t\t\tfor i := 0; i < 6; i++ {\n\t\t\t\t\tcMatrix[i], err = core.GetNumberAsFloat(op.Params[i])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcommon.Log.Debug(\"cm Float parse error\")\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"re\":\n\t\t\t\tif inText {\n\t\t\t\t\tcommon.Log.Debug(\"re operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif len(op.Params) != 4 {\n\t\t\t\t\tcommon.Log.Debug(\"Error re should only get 4 input params, got %d\", len(op.Params))\n\t\t\t\t\treturn errors.New(\"Incorrect parameter count\")\n\t\t\t\t}\n\n\t\t\t\trect0, err = core.GetNumberAsFloat(op.Params[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\tcommon.Log.Debug(\"re Float parse error\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\trect1, err = core.GetNumberAsFloat(op.Params[1])\n\t\t\t\tif err != nil {\n\t\t\t\t\tcommon.Log.Debug(\"re Float parse error\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\trect2, err = core.GetNumberAsFloat(op.Params[2])\n\t\t\t\tif err != nil {\n\t\t\t\t\tcommon.Log.Debug(\"re Float parse error\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\trect3, err = core.GetNumberAsFloat(op.Params[3])\n\t\t\t\tif err != nil {\n\t\t\t\t\tcommon.Log.Debug(\"re Float parse error\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase \"BT\":\n\t\t\t\tinText = true\n\t\t\tcase \"ET\":\n\t\t\t\tinText = false\n\t\t\t\tpreRect0 = rect0\n\t\t\t\tpreRect1 = rect1\n\t\t\t\tpreRect2 = rect2\n\t\t\t\tpreRect3 = rect3\n\t\t\tcase \"Tf\":\n\t\t\t\tif !inText {\n\t\t\t\t\tcommon.Log.Debug(\"Tf operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tif len(op.Params) != 2 {\n\t\t\t\t\tcommon.Log.Debug(\"Error Tf should only get 2 input params, got %d\", len(op.Params))\n\t\t\t\t\treturn errors.New(\"Incorrect parameter count\")\n\t\t\t\t}\n\n\t\t\t\tfontName, ok := op.Params[0].(*core.PdfObjectName)\n\t\t\t\tif !ok {\n\t\t\t\t\tcommon.Log.Debug(\"Error Tf font input not a name, %s\", op.Params[0])\n\t\t\t\t\treturn errors.New(\"Tf range error\")\n\t\t\t\t}\n\n\t\t\t\tcommon.Log.Trace(\"fontName: %s\", fontName)\n\n\t\t\t\tsize, err := core.GetNumberAsFloat(op.Params[1])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.New(\"fontsize Float parse error\")\n\t\t\t\t} else {\n\t\t\t\t\tfontSize = float64(size)\n\t\t\t\t}\n\n\t\t\t\tfont = nil\n\t\t\t\tcodemap = nil\n\t\t\t\tcidCodemap = nil\n\t\t\t\tif font, ok = f[core.PdfObjectName(*fontName)]; ok {\n\t\t\t\t\tcodemap = font.GetCmap()\n\t\t\t\t\tcidCodemap = font.GetCidCmap()\n\t\t\t\t} else {\n\t\t\t\t\tcommon.Log.Debug(\"Error: can't find Tf font by name\")\n\t\t\t\t\treturn errors.New(\"can't find Tf font by name\")\n\t\t\t\t}\n\t\t\tcase \"T*\":\n\t\t\t\tif !inText {\n\t\t\t\t\tcommon.Log.Debug(\"T* operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif rect0 != preRect0 || rect1 != preRect1 || rect2 != preRect2 || rect3 != preRect3 {\n\t\t\t\t\tbuf.WriteString(\"\\n\")\n\t\t\t\t}\n\t\t\tcase \"'\":\n\t\t\t\t//quote = T* + Tj\n\t\t\t\tif !inText {\n\t\t\t\t\tcommon.Log.Debug(\"quote operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif rect0 != preRect0 || rect1 != preRect1 || rect2 != preRect2 || rect3 != preRect3 {\n\t\t\t\t\tbuf.WriteString(\"\\n\")\n\t\t\t\t}\n\t\t\t\tif len(op.Params) < 1 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tparam, ok := op.Params[0].(*core.PdfObjectString)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Invalid parameter type, not string (%T)\", op.Params[0])\n\t\t\t\t}\n\n\t\t\t\t//first change charcode to cid string\n\t\t\t\tif font != nil && font.GetmPredefinedCmap() && cidCodemap != nil {\n\t\t\t\t\tstr := cidCodemap.CharcodeBytesToCidStr([]byte(*param))\n\t\t\t\t\tparam = core.MakeString(str)\n\t\t\t\t}\n\n\t\t\t\tif codemap != nil {\n\t\t\t\t\tif font.GetSimpleEncodingTableFlag() {\n\t\t\t\t\t\tbuf.WriteString(codemap.CharcodeBytesToUnicode([]byte(*param), font.GetSimpleEncodingTable(), true))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(codemap.CharcodeBytesToUnicode([]byte(*param), []uint{}, false))\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif font != nil && font.GetSimpleEncodingTableFlag() {\n\t\t\t\t\t\tfor _, cid := range []byte(*param) {\n\t\t\t\t\t\t\tr := cmap.Utf8CodepointToUtf8(font.GetSimpleEncodingTable()[cid])\n\t\t\t\t\t\t\tbuf.WriteString(r)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(string(*param))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"\\\"\":\n\t\t\t\t//quote = T* + ac + aw + Tj\n\t\t\t\tif !inText {\n\t\t\t\t\tcommon.Log.Debug(\"double quote operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif rect0 != preRect0 || rect1 != preRect1 || rect2 != preRect2 || rect3 != preRect3 {\n\t\t\t\t\tbuf.WriteString(\"\\n\")\n\t\t\t\t}\n\t\t\t\tif len(op.Params) < 1 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tparam, ok := op.Params[2].(*core.PdfObjectString)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Invalid parameter type, not string (%T)\", op.Params[2])\n\t\t\t\t}\n\n\t\t\t\t//first change charcode to cid string\n\t\t\t\tif font != nil && font.GetmPredefinedCmap() && cidCodemap != nil {\n\t\t\t\t\tstr := cidCodemap.CharcodeBytesToCidStr([]byte(*param))\n\t\t\t\t\tparam = core.MakeString(str)\n\t\t\t\t}\n\n\t\t\t\tif codemap != nil {\n\t\t\t\t\tif font.GetSimpleEncodingTableFlag() {\n\t\t\t\t\t\tbuf.WriteString(codemap.CharcodeBytesToUnicode([]byte(*param), font.GetSimpleEncodingTable(), true))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(codemap.CharcodeBytesToUnicode([]byte(*param), []uint{}, false))\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif font != nil && font.GetSimpleEncodingTableFlag() {\n\t\t\t\t\t\tfor _, cid := range []byte(*param) {\n\t\t\t\t\t\t\tr := cmap.Utf8CodepointToUtf8(font.GetSimpleEncodingTable()[cid])\n\t\t\t\t\t\t\tbuf.WriteString(r)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(string(*param))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"Td\", \"TD\":\n\t\t\t\tif !inText {\n\t\t\t\t\tcommon.Log.Debug(\"Td/TD operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t// Params: [tx ty], corresponeds to Tm=Tlm=[1 0 0;0 1 0;tx ty 1]*Tm\n\t\t\t\tif len(op.Params) != 2 {\n\t\t\t\t\tcommon.Log.Debug(\"Td/TD invalid arguments\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\ttx, err := core.GetNumberAsFloat(op.Params[0])\n\t\t\t\tif err != nil {\n\t\t\t\t\tcommon.Log.Debug(\"Td Float parse error\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tty, err := core.GetNumberAsFloat(op.Params[1])\n\t\t\t\tif err != nil {\n\t\t\t\t\tcommon.Log.Debug(\"Td Float parse error\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tif tx > 0 {\n\t\t\t\t\txTx = tx\n\t\t\t\t\t//buf.WriteString(\" \")\n\t\t\t\t}\n\t\t\t\tif ty < 0 {\n\t\t\t\t\t// TODO: More flexible space characters?\n\t\t\t\t\tif rect0 != preRect0 || rect1 != preRect1 || rect2 != preRect2 || rect3 != preRect3 {\n\t\t\t\t\t\tbuf.WriteString(\"\\n\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"Tm\":\n\t\t\t\tif !inText {\n\t\t\t\t\tcommon.Log.Debug(\"Tm operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t// Params: a,b,c,d,e,f as in Tm = [a b 0; c d 0; e f 1].\n\t\t\t\t// The last two (e,f) represent translation.\n\t\t\t\tif len(op.Params) != 6 {\n\t\t\t\t\treturn errors.New(\"Tm: Invalid number of inputs\")\n\t\t\t\t}\n\t\t\t\txfloat, ok := op.Params[4].(*core.PdfObjectFloat)\n\t\t\t\tif !ok {\n\t\t\t\t\txint, ok := op.Params[4].(*core.PdfObjectInteger)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\txfloat = core.MakeFloat(float64(*xint))\n\t\t\t\t}\n\t\t\t\tyfloat, ok := op.Params[5].(*core.PdfObjectFloat)\n\t\t\t\tif !ok {\n\t\t\t\t\tyint, ok := op.Params[5].(*core.PdfObjectInteger)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\tyfloat = core.MakeFloat(float64(*yint))\n\t\t\t\t}\n\n\t\t\t\tif yPos == -1 {\n\t\t\t\t\tyPos = float64(*yfloat)\n\t\t\t\t} else if cMatrix[3]*yPos > cMatrix[3]*float64(*yfloat) {\n\t\t\t\t\tif rect0 != preRect0 || rect1 != preRect1 || rect2 != preRect2 || rect3 != preRect3 {\n\t\t\t\t\t\tbuf.WriteString(\"\\n\")\n\t\t\t\t\t}\n\n\t\t\t\t\t//temp bugfix for using TD and next line\n\t\t\t\t\txPos += -(xTx*cMatrix[0]*fontSize/1000.0 + fontSize)\n\t\t\t\t\tif xPos < float64(*xfloat) {\n\t\t\t\t\t\tbuf.WriteString(\"\\n\")\n\t\t\t\t\t}\n\n\t\t\t\t\txPos = float64(*xfloat)\n\t\t\t\t\tyPos = float64(*yfloat)\n\t\t\t\t\treturn nil\n\t\t\t\t} else {\n\t\t\t\t\tyPos = float64(*yfloat)\n\t\t\t\t}\n\n\t\t\t\tif xPos == -1 {\n\t\t\t\t\txPos = float64(*xfloat)\n\t\t\t\t} else if xPos < float64(*xfloat) {\n\t\t\t\t\tbuf.WriteString(\"\\t\")\n\t\t\t\t\txPos = float64(*xfloat)\n\t\t\t\t}\n\t\t\tcase \"TJ\":\n\t\t\t\tif !inText {\n\t\t\t\t\tcommon.Log.Debug(\"TJ operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif len(op.Params) < 1 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tparamList, ok := op.Params[0].(*core.PdfObjectArray)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Invalid parameter type, no array (%T)\", op.Params[0])\n\t\t\t\t}\n\n\t\t\t\tsum := 0\n\t\t\t\tfor index, obj := range *paramList {\n\t\t\t\t\tswitch v := obj.(type) {\n\t\t\t\t\tcase *core.PdfObjectString:\n\t\t\t\t\t\t//first change charcode to cid string\n\t\t\t\t\t\tif font != nil && font.GetmPredefinedCmap() && cidCodemap != nil {\n\t\t\t\t\t\t\tstr := cidCodemap.CharcodeBytesToCidStr([]byte(*v))\n\t\t\t\t\t\t\tv = core.MakeString(str)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t//common.Log.Debug(\"origin: %X\", []byte(*v))\n\n\t\t\t\t\t\t// has ToUnicode\n\t\t\t\t\t\tif codemap != nil {\n\t\t\t\t\t\t\t//common.Log.Debug(\"parsed str: %s\", codemap.CharcodeBytesToUnicode([]byte(*v), []uint{}, false))\n\t\t\t\t\t\t\tif font.GetSimpleEncodingTableFlag() {\n\t\t\t\t\t\t\t\tbuf.WriteString(codemap.CharcodeBytesToUnicode([]byte(*v), font.GetSimpleEncodingTable(), true))\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tbuf.WriteString(codemap.CharcodeBytesToUnicode([]byte(*v), []uint{}, false))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t//no ToUnicode but has font encoding\n\t\t\t\t\t\t\tif font != nil && font.GetSimpleEncodingTableFlag() {\n\t\t\t\t\t\t\t\tfor _, cid := range []byte(*v) {\n\t\t\t\t\t\t\t\t\tr := cmap.Utf8CodepointToUtf8(font.GetSimpleEncodingTable()[cid])\n\t\t\t\t\t\t\t\t\tbuf.WriteString(r)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tbuf.WriteString(string(*v))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tsum += len([]byte(*v))\n\n\t\t\t\t\t\tif index == len(*paramList)-1 {\n\t\t\t\t\t\t\txPos += fontSize * float64(sum/2)\n\t\t\t\t\t\t\t//default space size\n\t\t\t\t\t\t\txPos += 1.5\n\t\t\t\t\t\t}\n\n\t\t\t\t\tcase *core.PdfObjectFloat:\n\t\t\t\t\t\txPos += float64(-*v) * (mScaling / 100.0) * fontSize / 1000.0\n\t\t\t\t\tcase *core.PdfObjectInteger:\n\t\t\t\t\t\txPos += float64(-*v) * (mScaling / 100.0) * fontSize / 1000.0\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase \"TZ\":\n\t\t\t\tif !inText {\n\t\t\t\t\tcommon.Log.Debug(\"TZ operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif len(op.Params) < 1 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tparam, ok := op.Params[0].(*core.PdfObjectInteger)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Invalid parameter type, not integer (%T)\", op.Params[0])\n\t\t\t\t}\n\n\t\t\t\tmScaling = float64(*param)\n\t\t\tcase \"Tj\":\n\t\t\t\tif !inText {\n\t\t\t\t\tcommon.Log.Debug(\"Tj operand outside text\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif len(op.Params) < 1 {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tparam, ok := op.Params[0].(*core.PdfObjectString)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"Invalid parameter type, not string (%T)\", op.Params[0])\n\t\t\t\t}\n\n\t\t\t\t//first change charcode to cid string\n\t\t\t\tif font != nil && font.GetmPredefinedCmap() && cidCodemap != nil {\n\t\t\t\t\tstr := cidCodemap.CharcodeBytesToCidStr([]byte(*param))\n\t\t\t\t\tparam = core.MakeString(str)\n\t\t\t\t}\n\n\t\t\t\t//common.Log.Debug(\"origin: %X\", []byte(*param))\n\n\t\t\t\tif codemap != nil {\n\t\t\t\t\t//common.Log.Debug(\"parsed str: %s\", codemap.CharcodeBytesToUnicode([]byte(*param), []uint{}, false))\n\t\t\t\t\tif font.GetSimpleEncodingTableFlag() {\n\t\t\t\t\t\tbuf.WriteString(codemap.CharcodeBytesToUnicode([]byte(*param), font.GetSimpleEncodingTable(), true))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(codemap.CharcodeBytesToUnicode([]byte(*param), []uint{}, false))\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif font != nil && font.GetSimpleEncodingTableFlag() {\n\t\t\t\t\t\tfor _, cid := range []byte(*param) {\n\t\t\t\t\t\t\tr := cmap.Utf8CodepointToUtf8(font.GetSimpleEncodingTable()[cid])\n\t\t\t\t\t\t\tbuf.WriteString(r)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbuf.WriteString(string(*param))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\n\terr = processor.Process(e.fontNamesMap)\n\tif err != nil {\n\t\tcommon.Log.Error(\"Error processing: %v\", err)\n\t\treturn buf.String(), err\n\t}\n\n\t//procBuf(&buf)\n\n\treturn buf.String(), nil\n}",
"func NewPatternTranslation() *PatternTranslation {\n\tret := &PatternTranslation{\n\t\tMappings: []*FormatMapping{\n\t\t\tnewFormatMapping(\"January\", \"MMMM\", false),\n\t\t\tnewFormatMapping(\"Jan\", \"MMM\", false),\n\t\t\tnewFormatMapping(\"1\", \"M\", false),\n\t\t\tnewFormatMapping(\"01\", \"MM\", false),\n\t\t\tnewFormatMapping(\"Monday\", \"EEEE\", false),\n\t\t\tnewFormatMapping(\"Mon\", \"EEE\", false),\n\t\t\tnewFormatMapping(\"2\", \"d\", false),\n\t\t\tnewFormatMapping(\"_2\", \"_d\", true),\n\t\t\tnewFormatMapping(\"02\", \"dd\", false),\n\t\t\tnewFormatMapping(\"15\", \"HH\", false),\n\t\t\tnewFormatMapping(\"3\", \"K\", false),\n\t\t\tnewFormatMapping(\"03\", \"KK\", false),\n\t\t\tnewFormatMapping(\"4\", \"m\", false),\n\t\t\tnewFormatMapping(\"04\", \"mm\", false),\n\t\t\tnewFormatMapping(\"5\", \"s\", false),\n\t\t\tnewFormatMapping(\"05\", \"ss\", false),\n\t\t\tnewFormatMapping(\"2006\", \"yyyy\", false),\n\t\t\tnewFormatMapping(\"06\", \"yy\", false),\n\t\t\tnewFormatMapping(\"PM\", \"aa\", false),\n\t\t\tnewFormatMapping(\"pm\", \"aa\", true),\n\t\t\tnewFormatMapping(\"MST\", \"Z\", false),\n\t\t\tnewFormatMapping(\"Z0700\", \"'Z'XX\", false),\n\t\t\tnewFormatMapping(\"Z070000\", \"'Z'XX\", true),\n\t\t\tnewFormatMapping(\"Z07\", \"'Z'X\", false),\n\t\t\tnewFormatMapping(\"Z07:00\", \"'Z'XXX\", false),\n\t\t\tnewFormatMapping(\"Z07:00:00\", \"'Z'XXX\", true),\n\t\t\tnewFormatMapping(\"-0700\", \"XX\", false),\n\t\t\tnewFormatMapping(\"-070000\", \"'Z'XX\", true),\n\t\t\tnewFormatMapping(\"-07\", \"X\", false),\n\t\t\tnewFormatMapping(\"-07:00\", \"XXX\", false),\n\t\t\tnewFormatMapping(\"-07:00:00\", \"XXX\", true),\n\t\t\tnewFormatMapping(\"999999999\", \"SSS\", false),\n\t\t},\n\t}\n\treturn ret\n}",
"func Pattern(query []byte) []byte {\n\ttokenizer := sqlparser.NewStringTokenizer(string(query))\n\tbuf := bytes.Buffer{}\n\tl := make([]byte, 4)\n\tfor {\n\t\ttyp, val := tokenizer.Scan()\n\t\tswitch typ {\n\t\tcase sqlparser.ID: //table, database, variable & ... names\n\t\t\tbuf.Write(val)\n\t\tcase 0: //End of query\n\t\t\treturn buf.Bytes()\n\t\tdefault:\n\t\t\tbinary.BigEndian.PutUint32(l, uint32(typ))\n\t\t\tbuf.Write(l)\n\t\t}\n\t}\n}",
"func (plan *Plan) AsText() *PlanAsText {\n\tresult := NewPlanAsText()\n\n\t// apply the plan and capture actions as text\n\tplan.applyInternal(WrapSequential(func(act Interface) error {\n\t\tresult.Actions = append(result.Actions, act.DescribeChanges())\n\t\treturn nil\n\t}), NewApplyResultUpdaterImpl())\n\n\treturn result\n}",
"func NewTextQuery() *TextQuery {\n\treturn &TextQuery{\n\t\tIn: make([]string, 0),\n\t}\n}",
"func (bp *binaryPattern) toPattern() (p *Pattern) {\n\tp = new(Pattern)\n\n\t// Convert headers\n\tn := 0\n\tfor bp.version_string[n] != byte(0x0) {\n\t\tn++\n\t}\n\tp.version = string(bp.version_string[:n])\n\tp.tempo = bp.tempo\n\n\t// Convert tracks\n\tp.tracks = make(map[int]Track, len(bp.tracks))\n\tp.printOrder = make([]int, 0)\n\tfor _, bt := range bp.tracks {\n\t\tvar t Track\n\t\tt.name = string(bt.name[:int(bt.strlen)])\n\t\tfor i := 0; i < len(t.steps); i++ {\n\t\t\tt.steps[i] = bt.steps[i] == 0x1\n\t\t}\n\n\t\tvar id int = int(bt.id)\n\t\tp.tracks[id] = t\n\t\tp.printOrder = append(p.printOrder, id)\n\t}\n\treturn p\n}",
"func GetTextFromQuery(body string, query string) string {\n\tdoc, err := goquery.NewDocumentFromReader(strings.NewReader(body))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttextFromQuery := \"\"\n\t// Find the requested item\n\tdoc.Find(query).Each(func(i int, s *goquery.Selection) {\n\t\t// For each item found, get the title and absolute link\n\t\ttextFromQuery = s.Text()\n\t})\n\n\treturn textFromQuery\n}",
"func Pattern(query string) []byte {\n\ttokenizer := sqlparser.NewStringTokenizer(query)\n\tbuf := bytes.Buffer{}\n\tl := make([]byte, 4)\n\tfor {\n\t\ttyp, val := tokenizer.Scan()\n\t\tswitch typ {\n\t\tcase sqlparser.ID: //table, database, variable & ... names\n\t\t\tbuf.Write(val)\n\t\tcase 0: //End of query\n\t\t\treturn buf.Bytes()\n\t\tdefault:\n\t\t\tbinary.BigEndian.PutUint32(l, uint32(typ))\n\t\t\tbuf.Write(l)\n\t\t}\n\t}\n}",
"func NotifyTextProcess(stBodyInfo public.BodyInfo, pstNotifyText *public.NotifyText) {\n\n\t(*pstNotifyText).Msgtype = \"text\"\n\tif stBodyInfo.ObjectAttributes.Status == \"success\" {\n\t\t(*pstNotifyText).Text.Content += \"恭喜你CI成功了!\" + \"\\n\"\n\t} else {\n\t\t(*pstNotifyText).Text.Content += stBodyInfo.User.Name + \"的CI失败了!\" + \"\\n\"\n\t}\n\n\tfmt.Println(\"Username : \", stBodyInfo.User.Username)\n\tfmt.Println(\"mobile : \", public.MapUsername2Mobile[stBodyInfo.User.Username])\n\t(*pstNotifyText).At.AtMobiles = append((*pstNotifyText).At.AtMobiles, public.MapUsername2Mobile[stBodyInfo.User.Username])\n\tfmt.Println(\"mobile : \", (*pstNotifyText).At.AtMobiles)\n\n\tfmt.Println(\"ref : \", stBodyInfo.ObjectAttributes.Ref)\n\tfmt.Println(\"env : \", public.MapRxQueue[stBodyInfo.ObjectAttributes.ID])\n\t/* notify to all if master branch */\n\tif stBodyInfo.ObjectAttributes.Ref == \"master\" && (public.MapRxQueue[stBodyInfo.ObjectAttributes.ID] == \"NEBULAE-DEV-A\" || public.MapRxQueue[stBodyInfo.ObjectAttributes.ID] == \"\") {\n\t\t\t(*pstNotifyText).At.IsAtAll = \"true\"\n\t} else {\n\t\t/* notify to all if pipeline failed */\n\t\tif stBodyInfo.ObjectAttributes.Status == \"success\" {\n\t\t\t(*pstNotifyText).At.IsAtAll = \"false\"\n\t\t} else {\n\t\t\t(*pstNotifyText).At.IsAtAll = \"true\"\n\t\t}\n\t}\n\n\tfmt.Println(\"+++++++++NotifyTextProcess++++++++++++++++\")\n\tfmt.Println(*pstNotifyText)\n\tfmt.Println(\"++++++++++++++++++++++++++++++++++++++++\")\n}",
"func (c *Context) TextQuery(query string) (*Result, *Outcome, error) {\n\tc_query := C.CString(query)\n\tdefer C.free(unsafe.Pointer(c_query))\n\n\tc_result, err := C.wit_text_query(c.context, c_query, c.access_token)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer C.free(unsafe.Pointer(c_result))\n\n\treturn c.parseResult(C.GoString(c_result))\n}",
"func TestGetSingleTextInfo(t *testing.T) {\n\tconn := db.GetConnection()\n\ttextInfo := m.TextInfo{}\n\tconn.First(&textInfo)\n\n\tid := textInfo.Id\n\n\tparams := make(map[string]string)\n\tparams[\"id\"] = strconv.Itoa(textInfo.Id)\n\n\ttextInfo, err := GetSingleTextInfo(params)\n\tif err != nil {\n\t\tt.Errorf(err.Error())\n\t} else {\n\t\tif textInfo.Id != id {\n\t\t\tt.Errorf(\"The record with %s id cannot be found\", params[\"id\"])\n\t\t}\n\t}\n}",
"func (m *QueryCondition) GetQueryText()(*string) {\n val, err := m.GetBackingStore().Get(\"queryText\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func NewQueryRuleInfo() *QueryRuleInfo {\n\tqri := &QueryRuleInfo{\n\t\tqueryRulesMap: map[string]*QueryRules{},\n\t}\n\treturn qri\n}",
"func Search(txt, pat string) {\n\tm := len(pat)\n\tn := len(txt)\n\n\t// pMap[] stores count of all characters of pattern\n\t// tMap[] stores count of current window of text\n\tpMap := make(map[uint8]int)\n\ttMap := make(map[uint8]int)\n\n\t// Calculate pMap and tMap for the first window\n\tfor i := 0; i < m; i++ {\n\t\tpMap[pat[i]]++\n\t\ttMap[txt[i]]++\n\t}\n\n\t// Traverse through remaining characters of pattern\n\tfor i := m; i < n; i++ {\n\t\t// Compare counts of current window\n\t\t// of text with counts of pattern[]\n\t\tif compare(pMap, tMap) {\n\t\t\tfmt.Println(\"Found at Index \", i-m)\n\t\t}\n\n\t\t// Add current character to current window\n\t\ttMap[txt[i]]++\n\n\t\t// Remove the first character of previous window\n\t\ttMap[txt[i-m]]--\n\t}\n\n\t// Check for the last window in text\n\tif compare(pMap, tMap) {\n\t\tfmt.Println(\"Found at Index \", n-m)\n\t}\n\n}",
"func (*XMLDocument) QueryCommandText(commandId string) (s string) {\n\tmacro.Rewrite(\"$_.queryCommandText($1)\", commandId)\n\treturn s\n}",
"func FindQueryTextSearches(qMap *tokenizer.QueryMap) []QueryTextSearch {\n\tbsonMatch := make(bson.M)\n\tfor qType, qVal := range *qMap {\n\t\tswitch qType {\n\t\tcase \"lines\":\n\t\t\tlinesMap, ok := qVal.(map[string]int)\n\t\t\tif !ok || len(linesMap) != 1 {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfor linesOp, linesThreshold := range linesMap {\n\t\t\t\tbsonMatch[\"fileinfo.numlines\"] = bson.M{fmt.Sprintf(\"$%s\", linesOp): linesThreshold}\n\t\t\t}\n\n\t\tcase \"text\":\n\t\t\ttextMap, ok := qVal.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tqTextRegex, ok := textMap[\"val\"].(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tisRegex, ok := textMap[\"regex\"].(bool)\n\t\t\tif !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif !isRegex {\n\t\t\t\tqTextRegex = regexp.QuoteMeta(qTextRegex)\n\t\t\t}\n\n\t\t\tbsonMatch[\"fileinfo.formatteddata\"] = bson.M{\"$regex\": qTextRegex, \"$options\": \"si\"}\n\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tsession, err := mgo.Dial(\"mongodb://localhost\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer session.Close()\n\n\tcollQueriesTextSearch := common.GetCollection(session, \"queries.textsearch\")\n\n\tvar results []QueryTextSearch\n\terr = collQueriesTextSearch.Find(bsonMatch).Sort(\"fileinfo.repositoryname fileinfo.filepath\").All(&results)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn results\n}",
"func (me TPositionTypexStringPatternExtensionType) ToTxStringPatternExtensionType() TxStringPatternExtensionType {\n\treturn TxStringPatternExtensionType(me)\n}",
"func TestGetTextInfo(t *testing.T) {\n\ttextInfoList, _ := GetTextInfo(\n\t\tnil,\n\t\t\"\")\n\n\tif textInfoList == nil || len(textInfoList) == 0 {\n\t\tt.Errorf(\"Failed to query textinfo records\")\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewAddRemoteRDSNodeOK creates a AddRemoteRDSNodeOK with default headers values | func NewAddRemoteRDSNodeOK() *AddRemoteRDSNodeOK {
return &AddRemoteRDSNodeOK{}
} | [
"func NewAddRemoteRDSNodeDefault(code int) *AddRemoteRDSNodeDefault {\n\treturn &AddRemoteRDSNodeDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func (a *Client) AddRDSNode(params *AddRDSNodeParams) (*AddRDSNodeOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddRDSNodeParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"AddRDSNode\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v0/inventory/Nodes/AddRDSNode\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &AddRDSNodeReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*AddRDSNodeOK), nil\n\n}",
"func (o *AddRemoteRDSNodeOKBodyRemoteRDS) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (n *Nodes) AddRemote(clusterID uint64, nodeID uint64, target string) {\n\tif n.validate != nil && !n.validate(target) {\n\t\tplog.Panicf(\"invalid target %s\", target)\n\t}\n\tn.nmu.Lock()\n\tdefer n.nmu.Unlock()\n\tkey := raftio.GetNodeInfo(clusterID, nodeID)\n\tv, ok := n.nmu.nodes[key]\n\tif !ok {\n\t\tn.nmu.nodes[key] = target\n\t} else {\n\t\tif v != target {\n\t\t\tplog.Panicf(\"inconsistent target for %s, %s:%s\",\n\t\t\t\tlogutil.DescribeNode(clusterID, nodeID), v, target)\n\t\t}\n\t}\n}",
"func (c *Chord) addRemoteNode(remoteNode *node.RemoteNode) error {\n\tif !remoteNode.IsReady() {\n\t\treturn errors.New(\"Remote node is not ready yet\")\n\t}\n\n\terr := c.addSuccessor(remoteNode)\n\tif err != nil {\n\t\tlog.Errorf(\"Add %v to successors error: %v\", remoteNode, err)\n\t}\n\n\terr = c.addPredecessor(remoteNode)\n\tif err != nil {\n\t\tlog.Errorf(\"Add %v to predecessors error: %v\", remoteNode, err)\n\t}\n\n\tfor i := range c.fingerTable {\n\t\terr = c.addFingerTable(remoteNode, i)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Add %v to finger table %d error: %v\", remoteNode, i, err)\n\t\t}\n\t}\n\n\terr = c.addNeighbor(remoteNode)\n\tif err != nil {\n\t\tlog.Errorf(\"Add %v to neighbors error: %v\", remoteNode, err)\n\t}\n\n\treturn nil\n}",
"func (ctx Context) AddNode(count int, hardwareprofile, softwareprofile string) (AddNodeResult, error) {\n\tvar result AddNodeResult\n\n\t// we trust the signature\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\taddInput := AddNodeInput{inner{count, hardwareprofile, softwareprofile}}\n\tinput, errMarshal := json.Marshal(addInput)\n\tif errMarshal != nil {\n\t\treturn result, errMarshal\n\t}\n\turl := fmt.Sprintf(\"https://%s:8443/v1/nodes\", ctx.Address)\n\treq, errRequest := http.NewRequest(\"POST\", url, bytes.NewBuffer(input))\n\tif errRequest != nil {\n\t\treturn result, errRequest\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.SetBasicAuth(ctx.User, ctx.Password)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tdefer resp.Body.Close()\n\n\tdecoder := json.NewDecoder(resp.Body)\n\tif err := decoder.Decode(&result); err != nil {\n\t\treturn result, err\n\t}\n\treturn result, nil\n}",
"func (o *AddRemoteRDSNodeOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateRemoteRDS(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (sdk *SDK) NewNode(prefer *cloudsvr.PreferAttrs) (*cloudsvr.CloudNode, *cloudsvr.PreferAttrs, error) {\n\n\tvar (\n\t\tpassword, _ = utils.GenPassword(24)\n\t\treq = &CreateInstanceRequest{\n\t\t\tImageID: OsImage,\n\t\t\tPassword: password,\n\t\t\tInstanceName: NodeName,\n\t\t\tInstanceChargeType: \"PostPaid\", // require RMB 100+\n\t\t\tSecurityGroupID: \"whatever\", // will be automatic rewrite\n\t\t\tInternetChargeType: \"PayByTraffic\", // traffic payment\n\t\t\tInternetMaxBandwidthOut: \"100\", // 100M\n\t\t\tLabels: NodeLabels,\n\t\t}\n\t)\n\n\t// if prefered attributes set, use prefer region & instance-type\n\tif prefer != nil && prefer.Valid() == nil {\n\t\tvar (\n\t\t\treg = prefer.RegionOrZone\n\t\t\ttyp = prefer.InstanceType\n\t\t)\n\t\tlog.Printf(\"create aliyun ecs by using prefered region %s, instance type %s ...\", reg, typ)\n\n\t\treq.RegionID = reg // cn-beijing\n\t\treq.InstanceType = typ // ecs.n4.large\n\n\t\tcreated, err := sdk.createNode(req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tlog.Printf(\"created prefered aliyun ecs succeed: %s\", created.ID)\n\t\treturn created, prefer, nil\n\t}\n\n\tlog.Infoln(\"creating aliyun ecs by trying all regions & types ...\")\n\n\t// if prefered created failed, or without prefer region & instance-type\n\t// try best on all region & instance-types to create the new aliyun ecs\n\tvar (\n\t\tregions []RegionType // all of aliyun regions\n\t\ttypes []InstanceTypeItemType // all of instance types within given range of mems & cpus\n\t\terr error\n\t\tcreated *cloudsvr.CloudNode\n\t)\n\n\t// list all regions\n\tregions, err = sdk.ListRegions()\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListRegions() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\t// list specified range of instance types\n\ttypes, err = sdk.ListInstanceTypes(2, 4, 2, 8) // TODO range of given cpus/mems ranges\n\tif err != nil {\n\t\tlog.Errorf(\"sdk.NewNode.ListInstanceTypes() error: %v\", err)\n\t\treturn nil, nil, err\n\t}\n\n\tvar (\n\t\tuseRegionID, useInsType string\n\t)\n\t// range all regions & types to try to create ecs instance\n\tfor _, reg := range regions {\n\t\tfor _, typ := range types {\n\t\t\treq.RegionID = reg.RegionID // cn-beijing\n\t\t\treq.InstanceType = typ.InstanceTypeID // ecs.n4.large\n\n\t\t\t// if created succeed, directly return\n\t\t\tcreated, err = sdk.createNode(req)\n\t\t\tif err == nil {\n\t\t\t\tuseRegionID, useInsType = reg.RegionID, typ.InstanceTypeID\n\t\t\t\tgoto END\n\t\t\t}\n\n\t\t\tif sdk.isFatalError(err) {\n\t\t\t\tlog.Errorf(\"create aliyun ecs got fatal error, stop retry: %v\", err)\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tlog.Warnf(\"create aliyun ecs failed: %v, will retry another region or type\", err)\n\t\t}\n\t}\n\nEND:\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog.Printf(\"created aliyun ecs %s at %s and type is %s\", created.ID, useRegionID, useInsType)\n\treturn created, &cloudsvr.PreferAttrs{RegionOrZone: useRegionID, InstanceType: useInsType}, nil\n}",
"func newnode(id byte, name string, value string) *xmlx.Node {\n\tnode := xmlx.NewNode(id)\n\tif name != \"\" {\n\t\tnode.Name = xml.Name{\n\t\t\tLocal: name,\n\t\t}\n\t}\n\tif value != \"\" {\n\t\tnode.Value = value\n\t}\n\treturn node\n}",
"func (cc *ContrailCommand) CreateNode(host vcenter.ESXIHost) error {\n\tlog.Debug(\"Create Node:\", cc.AuthToken)\n\tnodeResource := contrailCommandNodeSync{\n\t\tResources: []*nodeResources{\n\t\t\t{\n\t\t\t\tKind: \"node\",\n\t\t\t\tData: &nodeData{\n\t\t\t\t\tNodeType: \"esxi\",\n\t\t\t\t\tUUID: host.UUID,\n\t\t\t\t\tHostname: host.Hostname,\n\t\t\t\t\tFqName: []string{\"default-global-system-config\", host.Hostname},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tjsonData, err := json.Marshal(nodeResource)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Sending Request\")\n\tresp, _, err := cc.sendRequest(\"/sync\", string(jsonData), \"POST\") //nolint: bodyclose\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Got status : \", resp.StatusCode)\n\tswitch resp.StatusCode {\n\tdefault:\n\t\treturn fmt.Errorf(\"resource creation failed, %d\", resp.StatusCode)\n\tcase 200, 201:\n\t}\n\treturn nil\n}",
"func (tapestry *Tapestry) addNode(remote Node, newnode Node) (neighbours []Node, err error) {\n\terr = makeRemoteNodeCall(remote, \"AddNode\", NodeRequest{remote, newnode}, &neighbours)\n\treturn\n}",
"func newNode(msg proto.Message) (*graphstore.Node, error) {\n\tany, err := ptypes.MarshalAny(msg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar key []byte\n\tif source, ok := msg.(*v1beta.Source); ok {\n\t\tkey = generateKey(\n\t\t\tany.GetTypeUrl(),\n\t\t\tsource.GetUrl(),\n\t\t)\n\t} else if module, ok := msg.(*v1beta.Module); ok {\n\t\tkey = generateKey(\n\t\t\tany.GetTypeUrl(),\n\t\t\tmodule.GetLanguage(),\n\t\t\tmodule.GetName(),\n\t\t)\n\t}\n\n\tkey = []byte(base64.StdEncoding.EncodeToString(key))\n\treturn &graphstore.Node{\n\t\tKey: key,\n\t\tBody: any,\n\t}, nil\n}",
"func NewRemote(v gointerfaces.Version, logger log.Logger, remoteKV remote.KVClient) remoteOpts {\n\treturn remoteOpts{bucketsCfg: mdbx.WithChaindataTables, version: v, log: logger, remoteKV: remoteKV}\n}",
"func (o *AddRemoteRDSNodeBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func NewRemoteNode() *RemoteNode {\n\tnode := &RemoteNode{\n\t\tbaseNode: newBaseNode(),\n\t\taddress: \"\",\n\t\tport: 0,\n\t}\n\n\treturn node\n}",
"func newRenterHostPair(name string) (*renterHostPair, error) {\n\treturn newCustomRenterHostPair(name, modules.ProdDependencies)\n}",
"func (zic *ZoneInterconnectHandler) AddRemoteZoneNode(node *corev1.Node) error {\n\tstart := time.Now()\n\tklog.Infof(\"Creating interconnect resources for remote zone node %s for the network %s\", node.Name, zic.GetNetworkName())\n\n\tnodeID := util.GetNodeID(node)\n\tif nodeID == -1 {\n\t\t// Don't consider this node as cluster-manager has not allocated node id yet.\n\t\treturn fmt.Errorf(\"failed to get node id for node - %s\", node.Name)\n\t}\n\n\t// Get the chassis id.\n\tchassisId, err := util.ParseNodeChassisIDAnnotation(node)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse node chassis-id for node - %s, error: %w\", node.Name, types.NewSuppressedError(err))\n\t}\n\n\tif err := zic.createRemoteZoneNodeResources(node, nodeID, chassisId); err != nil {\n\t\treturn fmt.Errorf(\"creating interconnect resources for remote zone node %s for the network %s failed : err - %w\", node.Name, zic.GetNetworkName(), err)\n\t}\n\tklog.Infof(\"Creating Interconnect resources for node %v took: %s\", node.Name, time.Since(start))\n\treturn nil\n}",
"func addNode(address, username, password, name, bundle string, approve bool) (string, error) {\n\tvar ret string\n\n\t// Build the request body.\n\tenrolBody := types.NodeEnrollmentBody{\n\t\tAddress: address,\n\t\tName: name,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tAutoApprove: !approve, // user specifies if they dont want it\n\t\tCallHome: false,\n\t}\n\tif bundle != \"\" {\n\t\tenrolBody.Bundle = bundle\n\t\tenrolBody.Hostname = name\n\t\tenrolBody.CallHome = true\n\t}\n\trequest := types.EnrollmentRequest{\n\t\tEnrollment: enrolBody,\n\t}\n\treqJSON, err := json.Marshal(&request)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\turl, err := client.GetURL(nodeURI)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\treq, err := client.BuildReq(&reqJSON, url, http.MethodPost, true)\n\trawResp, err := client.HTTPClient().Do(req)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\t_, err = client.ParseReq(rawResp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\treturn \"Node added successfully\\n\", nil\n}",
"func createNewNodeNetworkObject(writer *bufio.Writer, sourceOsmNode *osm.Node) {\n\ttags := sourceOsmNode.TagMap()\n\n\t// Punktnetzwerk 'Fahrrad'\n\tnewOsmNode := *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found := tags[\"icn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t} else {\n\t\trefValue, found = tags[\"ncn_ref\"]\n\t\tif found {\n\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t} else {\n\t\t\trefValue, found = tags[\"rcn_ref\"]\n\t\t\tif found {\n\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t} else {\n\t\t\t\trefValue, found = tags[\"lcn_ref\"]\n\t\t\t\tif found {\n\t\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_bicycle\"}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Punktnetzwerk 'Wandern'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"iwn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t} else {\n\t\trefValue, found = tags[\"nwn_ref\"]\n\t\tif found {\n\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t} else {\n\t\t\trefValue, found = tags[\"rwn_ref\"]\n\t\t\tif found {\n\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t} else {\n\t\t\t\trefValue, found = tags[\"lwn_ref\"]\n\t\t\t\tif found {\n\t\t\t\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_hiking\"}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\t\t\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\t\t\t\twriteNewNodeObject(writer, &newOsmNode)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Punktnetzwerk 'Inline-Skaten'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rin_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_inline_skates\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n\n\t// Punktnetzwerk 'Reiten'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rhn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_horse\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n\n\t// Punktnetzwerk 'Kanu'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rpn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_canoe\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n\n\t// Punktnetzwerk 'Motorboot'\n\tnewOsmNode = *sourceOsmNode // copy content (don't modify origin/source node)\n\tnewOsmNode.ID = 0\n\tnewOsmNode.Tags = []osm.Tag{} // remove all source tags\n\trefValue, found = tags[\"rmn_ref\"]\n\tif found {\n\t\ttag := osm.Tag{Key: \"node_network\", Value: \"node_motorboat\"}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\ttag = osm.Tag{Key: \"name\", Value: refValue}\n\t\tnewOsmNode.Tags = append(newOsmNode.Tags, tag)\n\t\twriteNewNodeObject(writer, &newOsmNode)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewAddRemoteRDSNodeDefault creates a AddRemoteRDSNodeDefault with default headers values | func NewAddRemoteRDSNodeDefault(code int) *AddRemoteRDSNodeDefault {
return &AddRemoteRDSNodeDefault{
_statusCode: code,
}
} | [
"func (client IdentityClient) createTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPost, \"/tagDefaults\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response CreateTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func newSSHDefaultConfig(userName, identity string) (*sshClientConfig, error) {\n\tconfig, err := sshDefaultConfig(userName, identity)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &sshClientConfig{ClientConfig: config}, nil\n}",
"func NewDefaultNode() *Node {\n\treturn &Node{\n\t\tPort: DefaultRedisPort,\n\t\tSlots: []Slot{},\n\t\tMigratingSlots: map[Slot]string{},\n\t\tImportingSlots: map[Slot]string{},\n\t}\n}",
"func NewDefaultNode() *Node {\n\treturn &Node{\n\t\tPort: DefaultSubmarinePort,\n\t\t///Slots: []Slot{},\n\t\t///MigratingSlots: map[Slot]string{},\n\t\t///ImportingSlots: map[Slot]string{},\n\t}\n}",
"func newNetworkDef() libvirtxml.Network {\n\tconst defNetworkXML = `\n\t\t<network>\n\t\t <name>default</name>\n\t\t <forward mode='nat'>\n\t\t <nat>\n\t\t <port start='1024' end='65535'/>\n\t\t </nat>\n\t\t </forward>\n\t\t</network>`\n\tif d, err := newDefNetworkFromXML(defNetworkXML); err != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected error while parsing default network definition: %s\", err))\n\t} else {\n\t\treturn d\n\t}\n}",
"func (o *AddOrUpdateNodePoolConfigItemParams) SetDefaults() {\n\t// no default values defined for this parameter\n}",
"func NewNode(defaultValue interface{}) Node {\n return Node{defaultValue, defaultValue}\n}",
"func SetDefaults_NodeRegistration(obj *NodeRegistrationOptions) {\n\tif len(obj.ImagePullPolicy) == 0 {\n\t\tobj.ImagePullPolicy = DefaultImagePullPolicy\n\t}\n}",
"func DefaultAddParams() *AddParams {\n\treturn &AddParams{\n\t\tRecursive: false,\n\t\tLayout: \"\", // corresponds to balanced layout\n\t\tChunker: \"size-262144\",\n\t\tRawLeaves: false,\n\t\tHidden: false,\n\t\tWrap: false,\n\t\tShard: false,\n\t\tProgress: false,\n\t\tCidVersion: 0,\n\t\tHashFun: \"sha2-256\",\n\t\tPinOptions: PinOptions{\n\t\t\tReplicationFactorMin: 0,\n\t\t\tReplicationFactorMax: 0,\n\t\t\tName: \"\",\n\t\t\tShardSize: DefaultShardSize,\n\t\t},\n\t}\n}",
"func GenServerNodeDefaults(options *CmdOptions, localIAs []string) {\n\t// reverse sort so that the default server will oppose the default client\n\tsort.Sort(sort.Reverse(sort.StringSlice(localIAs)))\n\n\tserFp := path.Join(options.StaticRoot, cfgFileSerUser)\n\tjsonBuf := []byte(`{ \"all\": [`)\n\tfor i := 0; i < len(localIAs); i++ {\n\t\t// use all localhost endpoints as possible servers for bwtester as least\n\t\tia := strings.Replace(localIAs[i], \"_\", \":\", -1)\n\t\tjson := []byte(`{\"name\":\"lo ` + ia + `\",\"isdas\":\"` + ia +\n\t\t\t`\", \"addr\":\"` + serDefAddr + `\",\"port\":` + strconv.Itoa(serPortDef) +\n\t\t\t`}`)\n\t\tjsonBuf = append(jsonBuf, json...)\n\t\tif i < (len(localIAs) - 1) {\n\t\t\tjsonBuf = append(jsonBuf, []byte(`,`)...)\n\t\t}\n\t}\n\tjsonBuf = append(jsonBuf, []byte(`] }`)...)\n\terr := ioutil.WriteFile(serFp, jsonBuf, 0644)\n\tCheckError(err)\n}",
"func (o *AddRemoteRDSNodeDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (p *provider) AddDefaults(spec v1alpha1.MachineSpec) (v1alpha1.MachineSpec, error) {\n\treturn spec, nil\n}",
"func (sess *reconcileStackSession) addDefaultPermalink(stack *pulumiv1alpha1.Stack) error {\n\tnamespacedName := types.NamespacedName{Name: stack.Name, Namespace: stack.Namespace}\n\terr := sess.getLatestResource(stack, namespacedName)\n\tif err != nil {\n\t\tsess.logger.Error(err, \"Failed to get latest Stack to update Stack Permalink URL\", \"Stack.Name\", stack.Spec.Stack)\n\t\treturn err\n\t}\n\tif stack.Status.LastUpdate == nil {\n\t\tstack.Status.LastUpdate = &pulumiv1alpha1.StackUpdateState{\n\t\t\tPermalink: pulumiv1alpha1.Permalink(fmt.Sprintf(\"%s/%s\", consoleURL, stack.Spec.Stack)),\n\t\t}\n\t} else {\n\t\tstack.Status.LastUpdate.Permalink = pulumiv1alpha1.Permalink(fmt.Sprintf(\"%s/%s\", consoleURL, stack.Name))\n\t}\n\terr = sess.updateResourceStatus(stack)\n\tif err != nil {\n\t\tsess.logger.Error(err, \"Failed to update Stack status with default permalink\", \"Stack.Name\", stack.Spec.Stack)\n\t\treturn err\n\t}\n\tsess.logger.Info(\"Successfully updated Stack with default permalink\", \"Stack.Name\", stack.Spec.Stack)\n\treturn nil\n}",
"func (d *DefaulterBuilder) AddDefault(f *ResourceFunction) {\n\tif !d.resource.Equals(f.Resource(), astmodel.EqualityOverrides{}) {\n\t\tpanic(\"cannot add default function on non-matching object types\")\n\t}\n\td.defaults = append(d.defaults, f)\n}",
"func NewPostCiNodesDefault(code int) *PostCiNodesDefault {\n\treturn &PostCiNodesDefault{\n\t\t_statusCode: code,\n\t}\n}",
"func (r *RequestBody) AddDefaults() {\n\tif r.DomainScope == \"\" {\n\t\tr.DomainScope = \"annotated\"\n\t}\n\tif len(r.Background) > 0 && r.DomainScope != \"custom\" {\n\t\tr.DomainScope = \"custom\"\n\t}\n\n\tif r.Organism == \"\" {\n\t\tr.Organism = \"hsapiens\"\n\t}\n\n\tif r.SignificanceThresholdMethod == \"\" {\n\t\tr.SignificanceThresholdMethod = \"gSCS\"\n\t}\n\n\tif r.UserThreshold == 0 {\n\t\tr.UserThreshold = 0.01\n\t}\n}",
"func (client IdentityClient) updateTagDefault(ctx context.Context, request common.OCIRequest, binaryReqBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (common.OCIResponse, error) {\n\n\thttpRequest, err := request.HTTPRequest(http.MethodPut, \"/tagDefaults/{tagDefaultId}\", binaryReqBody, extraHeaders)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response UpdateTagDefaultResponse\n\tvar httpResponse *http.Response\n\thttpResponse, err = client.Call(ctx, &httpRequest)\n\tdefer common.CloseBodyIfValid(httpResponse)\n\tresponse.RawResponse = httpResponse\n\tif err != nil {\n\t\treturn response, err\n\t}\n\n\terr = common.UnmarshalResponse(httpResponse, &response)\n\treturn response, err\n}",
"func addingDefaultFieldsToSchema(crdName string, schemaRaw []byte) ([]byte, error) {\n\tvar schema struct {\n\t\tProperties map[string]interface{} `json:\"properties\"`\n\t}\n\t_ = json.Unmarshal(schemaRaw, &schema)\n\n\tif len(schema.Properties) < 1 {\n\t\tlogging.V(6).Info(\"crd schema has no properties\", \"name\", crdName)\n\t\treturn schemaRaw, nil\n\t}\n\n\tif schema.Properties[\"apiVersion\"] == nil {\n\t\tapiVersionDefRaw := `{\"description\":\"APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\",\"type\":\"string\"}`\n\t\tapiVersionDef := make(map[string]interface{})\n\t\t_ = json.Unmarshal([]byte(apiVersionDefRaw), &apiVersionDef)\n\t\tschema.Properties[\"apiVersion\"] = apiVersionDef\n\t}\n\n\tif schema.Properties[\"metadata\"] == nil {\n\t\tmetadataDefRaw := `{\"$ref\":\"#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta\",\"description\":\"Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\"}`\n\t\tmetadataDef := make(map[string]interface{})\n\t\t_ = json.Unmarshal([]byte(metadataDefRaw), &metadataDef)\n\t\tschema.Properties[\"metadata\"] = metadataDef\n\t}\n\n\tschemaWithDefaultFields, _ := json.Marshal(schema)\n\n\treturn schemaWithDefaultFields, nil\n}",
"func (s *DataStore) CreateDefaultNode(name string) (*longhorn.Node, error) {\n\trequireLabel, err := s.GetSettingAsBool(types.SettingNameCreateDefaultDiskLabeledNodes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnode := &longhorn.Node{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t},\n\t\tSpec: longhorn.NodeSpec{\n\t\t\tName: name,\n\t\t\tAllowScheduling: true,\n\t\t\tEvictionRequested: false,\n\t\t\tTags: []string{},\n\t\t\tInstanceManagerCPURequest: 0,\n\t\t},\n\t}\n\n\t// For newly added node, the customized default disks will be applied only if the setting is enabled.\n\tif !requireLabel {\n\t\t// Note: this part wasn't moved to the controller is because\n\t\t// this will be done only once.\n\t\t// If user remove all the disks on the node, the default disk\n\t\t// will not be recreated automatically\n\t\tdataPath, err := s.GetSettingValueExisted(types.SettingNameDefaultDataPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tstorageReservedPercentageForDefaultDisk, err := s.GetSettingAsInt(types.SettingNameStorageReservedPercentageForDefaultDisk)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdisks, err := types.CreateDefaultDisk(dataPath, storageReservedPercentageForDefaultDisk)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnode.Spec.Disks = disks\n\t}\n\n\treturn s.CreateNode(node)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Code gets the status code for the add remote RDS node default response | func (o *AddRemoteRDSNodeDefault) Code() int {
return o._statusCode
} | [
"func (o *AddContainerNodeDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *AddClusterV5Default) Code() int {\n\treturn o._statusCode\n}",
"func (o *AddServiceInstanceDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *AddDeviceDatasourceInstanceDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *CreateAntivirusServerDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *CreateAllowedRegistryDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateNodeStateServicelightDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *NodesGetByIDDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *CreateServerSwitchingRuleDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *CreateOrUpdateNodePoolOK) Code() int {\n\treturn 200\n}",
"func (o *CreateConsulDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *ReplaceServerDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *PortsetCreateDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *UpdateNodePoolOK) Code() int {\n\treturn 200\n}",
"func (o *AddExternalDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *AddAPIDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *RegisterBareMetalHostDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *CreateNdmpUserDefault) Code() int {\n\treturn o._statusCode\n}",
"func (o *NisCreateDefault) Code() int {\n\treturn o._statusCode\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate validates this add remote RDS node body | func (o *AddRemoteRDSNodeBody) Validate(formats strfmt.Registry) error {
return nil
} | [
"func (o *AddRemoteRDSNodeOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateRemoteRDS(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AddRemoteRDSNodeOKBodyRemoteRDS) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddContainerNodeBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddRemoteRDSNodeDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateVnfdPostBody(body []byte) error {\n\tlog.Debug()\n\tioReaderObj := strings.NewReader(schemaStrVnfdInput)\n\treturn json_schema_val.ValidateJSONBufAgainstSchema(body, ioReaderObj, \"vnfdPostBody.json\")\n}",
"func ValidateVnfdInstanceBody(jsonval []byte) error {\n\tlog.Debug()\n\tioReaderObj := strings.NewReader(schemaStrParameterizedInstance)\n\treturn json_schema_val.ValidateJSONBufAgainstSchema(jsonval, ioReaderObj, \"vnfdInstanceBody.json\")\n}",
"func (ut *clusterPostBody) Validate() (err error) {\n\tif ut.NodePoolSize == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"nodePoolSize\"))\n\t}\n\tif ut.NamespaceID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"namespace_id\"))\n\t}\n\tif ut.NodePoolSize != nil {\n\t\tif *ut.NodePoolSize < 3 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, *ut.NodePoolSize, 3, true))\n\t\t}\n\t}\n\tif ut.NodePoolSize != nil {\n\t\tif *ut.NodePoolSize > 11 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, *ut.NodePoolSize, 11, false))\n\t\t}\n\t}\n\treturn\n}",
"func (o *AddExternalBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddQANPostgreSQLPgStatementsAgentBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddPMMAgentBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (ut *ClusterPostBody) Validate() (err error) {\n\n\tif ut.NamespaceID == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"namespace_id\"))\n\t}\n\tif ut.NodePoolSize < 3 {\n\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, ut.NodePoolSize, 3, true))\n\t}\n\tif ut.NodePoolSize > 11 {\n\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, ut.NodePoolSize, 11, false))\n\t}\n\treturn\n}",
"func (body *AddRequestBody) Validate() (err error) {\n\tif body.Token == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"token\", \"body\"))\n\t}\n\treturn\n}",
"func (m *PostConnectionsBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOptions(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRealms(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStrategy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AddContainerNodeOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateContainer(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (a *Client) AddRDSNode(params *AddRDSNodeParams) (*AddRDSNodeOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddRDSNodeParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"AddRDSNode\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v0/inventory/Nodes/AddRDSNode\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &AddRDSNodeReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*AddRDSNodeOK), nil\n\n}",
"func (o *UpdateHostUsageBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *V5AddNodePoolRequestNodeSpecAws) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateInstanceDistribution(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *Node) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif !_Node_Host_Pattern.MatchString(m.GetHost()) {\n\t\treturn NodeValidationError{\n\t\t\tfield: \"Host\",\n\t\t\treason: \"value does not match regex pattern \\\"^\\\\\\\\*?[0-9a-zA-Z-._]+$\\\"\",\n\t\t}\n\t}\n\n\tif val := m.GetPort(); val < 1 || val > 65535 {\n\t\treturn NodeValidationError{\n\t\t\tfield: \"Port\",\n\t\t\treason: \"value must be inside range [1, 65535]\",\n\t\t}\n\t}\n\n\tif m.GetWeight() < 0 {\n\t\treturn NodeValidationError{\n\t\t\tfield: \"Weight\",\n\t\t\treason: \"value must be greater than or equal to 0\",\n\t\t}\n\t}\n\n\tfor key, val := range m.GetMetadata() {\n\t\t_ = val\n\n\t\t// no validation rules for Metadata[key]\n\n\t\tif v, ok := interface{}(val).(interface{ Validate() error }); ok {\n\t\t\tif err := v.Validate(); err != nil {\n\t\t\t\treturn NodeValidationError{\n\t\t\t\t\tfield: fmt.Sprintf(\"Metadata[%v]\", key),\n\t\t\t\t\treason: \"embedded message failed validation\",\n\t\t\t\t\tcause: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func ValidateNewPostRequestBody(body *NewPostRequestBody) (err error) {\n\tif body.Body == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"body\", \"body\"))\n\t}\n\treturn\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate validates this add remote RDS node default body | func (o *AddRemoteRDSNodeDefaultBody) Validate(formats strfmt.Registry) error {
var res []error
if err := o.validateDetails(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
} | [
"func (o *AddRemoteRDSNodeBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddRemoteRDSNodeOKBodyRemoteRDS) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddRemoteRDSNodeOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateRemoteRDS(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AddContainerNodeDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AddContainerNodeBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddPMMAgentDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AddExternalDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateVnfdPostBody(body []byte) error {\n\tlog.Debug()\n\tioReaderObj := strings.NewReader(schemaStrVnfdInput)\n\treturn json_schema_val.ValidateJSONBufAgainstSchema(body, ioReaderObj, \"vnfdPostBody.json\")\n}",
"func ValidateNewPostRequestBody(body *NewPostRequestBody) (err error) {\n\tif body.Body == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"body\", \"body\"))\n\t}\n\treturn\n}",
"func (ut *clusterPostBody) Validate() (err error) {\n\tif ut.NodePoolSize == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"nodePoolSize\"))\n\t}\n\tif ut.NamespaceID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"namespace_id\"))\n\t}\n\tif ut.NodePoolSize != nil {\n\t\tif *ut.NodePoolSize < 3 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, *ut.NodePoolSize, 3, true))\n\t\t}\n\t}\n\tif ut.NodePoolSize != nil {\n\t\tif *ut.NodePoolSize > 11 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, *ut.NodePoolSize, 11, false))\n\t\t}\n\t}\n\treturn\n}",
"func (o *AddExternalBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddPMMAgentBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func Insert(w http.ResponseWriter, r *http.Request) {\n\tname := r.FormValue(\"name\")\n\tif !conf.NameIsValid(name) {\n\t\thttp.Error(w, \"Name should consist only of English letters and numbers separated by dots.\", 400)\n\t\treturn\n\t}\n\tvalueJSON := r.FormValue(\"value\")\n\tif len(valueJSON) == 0 {\n\t\thttp.Error(w, \"Node value is not specified\", 400)\n\t\treturn\n\t}\n\tvar value interface{}\n\terr := json.Unmarshal(([]byte)(valueJSON), &value)\n\tif err != nil {\n\t\thttp.Error(w, \"Node value should be proper json. Can't parse node value: \"+err.Error(), 400)\n\t\treturn\n\t}\n\terr = conf.CheckInterfaceConsistsOfMapsAndStrings(value)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\n\tpath := conf.NameToPath(name)\n\terr = conf.CreateNode(path, value)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\n\tfmt.Fprintf(w, \"\")\n}",
"func ValidateVnfdInstanceBody(jsonval []byte) error {\n\tlog.Debug()\n\tioReaderObj := strings.NewReader(schemaStrParameterizedInstance)\n\treturn json_schema_val.ValidateJSONBufAgainstSchema(jsonval, ioReaderObj, \"vnfdInstanceBody.json\")\n}",
"func (o *AddQANPostgreSQLPgStatementsAgentBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *ChangeRDSExporterDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (body *AddRequestBody) Validate() (err error) {\n\tif body.Token == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"token\", \"body\"))\n\t}\n\treturn\n}",
"func (o *PostSiteCreatedBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (ut *ClusterPostBody) Validate() (err error) {\n\n\tif ut.NamespaceID == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"namespace_id\"))\n\t}\n\tif ut.NodePoolSize < 3 {\n\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, ut.NodePoolSize, 3, true))\n\t}\n\tif ut.NodePoolSize > 11 {\n\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, ut.NodePoolSize, 11, false))\n\t}\n\treturn\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate validates this add remote RDS node OK body | func (o *AddRemoteRDSNodeOKBody) Validate(formats strfmt.Registry) error {
var res []error
if err := o.validateRemoteRDS(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
} | [
"func (o *AddRemoteRDSNodeBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddRemoteRDSNodeOKBodyRemoteRDS) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddRemoteRDSNodeDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AddContainerNodeBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (a *Client) AddRDSNode(params *AddRDSNodeParams) (*AddRDSNodeOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddRDSNodeParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"AddRDSNode\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v0/inventory/Nodes/AddRDSNode\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &AddRDSNodeReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*AddRDSNodeOK), nil\n\n}",
"func AddServer(data map[string]string)(err error) {\n uuid := data[\"uuid\"]\n err = ndb.GetTokenByUuid(uuid); if err!=nil{logs.Error(\"Error loading node token: %s\",err); return err}\n ipuuid,portuuid,err := ndb.ObtainPortIp(uuid)\n if err != nil {\n logs.Error(\"AddServer ERROR Obtaining Port and IP for Add a new server into STAP: \"+err.Error())\n return err\n }\n err = nodeclient.AddServer(ipuuid,portuuid, data)\n if err != nil {\n logs.Error(\"node/AddServer ERROR http data request: \"+err.Error())\n return err\n }\n return nil\n}",
"func (c *Client) NodeAdd(name, ID, serial string) error {\n\n\tme := \"NodeAdd\"\n\n\trn := rnNode(serial)\n\n\tdn := dnNode(serial)\n\n\tapi := \"/api/node/mo/uni/\" + dn + \".json\"\n\n\turl := c.getURL(api)\n\n\tj := fmt.Sprintf(`{\"fabricNodeIdentP\":{\"attributes\":{\"dn\":\"uni/%s\",\"serial\":\"%s\",\"nodeId\":\"%s\",\"name\":\"%s\",\"rn\":\"%s\",\"status\":\"created\"},\"children\":[]}}`,\n\t\tdn, serial, ID, name, rn)\n\n\tc.debugf(\"%s: url=%s json=%s\", me, url, j)\n\n\tbody, errPost := c.post(url, contentTypeJSON, bytes.NewBufferString(j))\n\tif errPost != nil {\n\t\treturn fmt.Errorf(\"%s: %v\", me, errPost)\n\t}\n\n\tc.debugf(\"%s: reply: %s\", me, string(body))\n\n\treturn parseJSONError(body)\n}",
"func (o *AddContainerNodeOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateContainer(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (c *Client) Add(ctx context.Context, node NodeInfo) error {\n\trequest := protocol.Message{}\n\tresponse := protocol.Message{}\n\n\trequest.Init(4096)\n\tresponse.Init(4096)\n\n\tprotocol.EncodeAdd(&request, node.ID, node.Address)\n\n\tif err := c.protocol.Call(ctx, &request, &response); err != nil {\n\t\treturn err\n\t}\n\n\tif err := protocol.DecodeEmpty(&response); err != nil {\n\t\treturn err\n\t}\n\n\t// If the desired role is spare, there's nothing to do, since all newly\n\t// added nodes have the spare role.\n\tif node.Role == Spare {\n\t\treturn nil\n\t}\n\n\treturn c.Assign(ctx, node.ID, node.Role)\n}",
"func (ut *clusterPostBody) Validate() (err error) {\n\tif ut.NodePoolSize == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"nodePoolSize\"))\n\t}\n\tif ut.NamespaceID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"namespace_id\"))\n\t}\n\tif ut.NodePoolSize != nil {\n\t\tif *ut.NodePoolSize < 3 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, *ut.NodePoolSize, 3, true))\n\t\t}\n\t}\n\tif ut.NodePoolSize != nil {\n\t\tif *ut.NodePoolSize > 11 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, *ut.NodePoolSize, 11, false))\n\t\t}\n\t}\n\treturn\n}",
"func addNode(address, username, password, name, bundle string, approve bool) (string, error) {\n\tvar ret string\n\n\t// Build the request body.\n\tenrolBody := types.NodeEnrollmentBody{\n\t\tAddress: address,\n\t\tName: name,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tAutoApprove: !approve, // user specifies if they dont want it\n\t\tCallHome: false,\n\t}\n\tif bundle != \"\" {\n\t\tenrolBody.Bundle = bundle\n\t\tenrolBody.Hostname = name\n\t\tenrolBody.CallHome = true\n\t}\n\trequest := types.EnrollmentRequest{\n\t\tEnrollment: enrolBody,\n\t}\n\treqJSON, err := json.Marshal(&request)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\turl, err := client.GetURL(nodeURI)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\treq, err := client.BuildReq(&reqJSON, url, http.MethodPost, true)\n\trawResp, err := client.HTTPClient().Do(req)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\t_, err = client.ParseReq(rawResp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\treturn \"Node added successfully\\n\", nil\n}",
"func (ctx Context) AddNode(count int, hardwareprofile, softwareprofile string) (AddNodeResult, error) {\n\tvar result AddNodeResult\n\n\t// we trust the signature\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\taddInput := AddNodeInput{inner{count, hardwareprofile, softwareprofile}}\n\tinput, errMarshal := json.Marshal(addInput)\n\tif errMarshal != nil {\n\t\treturn result, errMarshal\n\t}\n\turl := fmt.Sprintf(\"https://%s:8443/v1/nodes\", ctx.Address)\n\treq, errRequest := http.NewRequest(\"POST\", url, bytes.NewBuffer(input))\n\tif errRequest != nil {\n\t\treturn result, errRequest\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.SetBasicAuth(ctx.User, ctx.Password)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tdefer resp.Body.Close()\n\n\tdecoder := json.NewDecoder(resp.Body)\n\tif err := decoder.Decode(&result); err != nil {\n\t\treturn result, err\n\t}\n\treturn result, nil\n}",
"func (ut *ClusterPostBody) Validate() (err error) {\n\n\tif ut.NamespaceID == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"namespace_id\"))\n\t}\n\tif ut.NodePoolSize < 3 {\n\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, ut.NodePoolSize, 3, true))\n\t}\n\tif ut.NodePoolSize > 11 {\n\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, ut.NodePoolSize, 11, false))\n\t}\n\treturn\n}",
"func Insert(w http.ResponseWriter, r *http.Request) {\n\tname := r.FormValue(\"name\")\n\tif !conf.NameIsValid(name) {\n\t\thttp.Error(w, \"Name should consist only of English letters and numbers separated by dots.\", 400)\n\t\treturn\n\t}\n\tvalueJSON := r.FormValue(\"value\")\n\tif len(valueJSON) == 0 {\n\t\thttp.Error(w, \"Node value is not specified\", 400)\n\t\treturn\n\t}\n\tvar value interface{}\n\terr := json.Unmarshal(([]byte)(valueJSON), &value)\n\tif err != nil {\n\t\thttp.Error(w, \"Node value should be proper json. Can't parse node value: \"+err.Error(), 400)\n\t\treturn\n\t}\n\terr = conf.CheckInterfaceConsistsOfMapsAndStrings(value)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\n\tpath := conf.NameToPath(name)\n\terr = conf.CreateNode(path, value)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\n\tfmt.Fprintf(w, \"\")\n}",
"func (cc *ContrailCommand) CreateNode(host vcenter.ESXIHost) error {\n\tlog.Debug(\"Create Node:\", cc.AuthToken)\n\tnodeResource := contrailCommandNodeSync{\n\t\tResources: []*nodeResources{\n\t\t\t{\n\t\t\t\tKind: \"node\",\n\t\t\t\tData: &nodeData{\n\t\t\t\t\tNodeType: \"esxi\",\n\t\t\t\t\tUUID: host.UUID,\n\t\t\t\t\tHostname: host.Hostname,\n\t\t\t\t\tFqName: []string{\"default-global-system-config\", host.Hostname},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tjsonData, err := json.Marshal(nodeResource)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Sending Request\")\n\tresp, _, err := cc.sendRequest(\"/sync\", string(jsonData), \"POST\") //nolint: bodyclose\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug(\"Got status : \", resp.StatusCode)\n\tswitch resp.StatusCode {\n\tdefault:\n\t\treturn fmt.Errorf(\"resource creation failed, %d\", resp.StatusCode)\n\tcase 200, 201:\n\t}\n\treturn nil\n}",
"func RegisterNode(w http.ResponseWriter, r *http.Request) {\n\tfmt.Println(\"Register Node called\")\n\tdecoder := json.NewDecoder(r.Body)\n\tvar signedNode models.SignedPeer\n\terr := decoder.Decode(&signedNode)\n\tif err != nil {\n\t\tfmt.Println(\"Unable to decode request to register node. Incorrect format\")\n\t}\n\tvar nodeListJSON string\n\tpeerNode, ok := nodeList[signedNode.PeerNode.PeerId]\n\tif !ok {\n\t\tif(signedNode.PeerNode.VerifyPeerSignature(signedNode.SignedPeerNode)){\n\t\t\tfmt.Println(\"Signature Verified for new user. Register Successful\")\n\t\t\tnodeList[signedNode.PeerNode.PeerId] = signedNode.PeerNode\n\t\t\tnodeListJSON = nodeList.GetNodeListJSON()\n\t\t}else{\n\t\t\tnodeListJSON = \"{}\"\n\t\t}\n\t}else{\n\t\tfmt.Println(\"Peer Id already registered with this uuid.\")\n\t\tif(peerNode.VerifyPeerSignature(signedNode.SignedPeerNode)){\n\t\t\tfmt.Println(\"Verified with old public key. Updating key pair\")\n\t\t\tnodeList[signedNode.PeerNode.PeerId] = signedNode.PeerNode\n\t\t\tnodeListJSON = nodeList.GetNodeListJSON()\n\t\t}else{\n\t\t\tnodeListJSON = \"{}\"\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(nodeListJSON))\n}",
"func (o *GetAgentOKBodyNodeExporter) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *V5AddNodePoolRequestNodeSpecAws) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateInstanceDistribution(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateVnfdInstanceBody(jsonval []byte) error {\n\tlog.Debug()\n\tioReaderObj := strings.NewReader(schemaStrParameterizedInstance)\n\treturn json_schema_val.ValidateJSONBufAgainstSchema(jsonval, ioReaderObj, \"vnfdInstanceBody.json\")\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Validate validates this add remote RDS node OK body remote RDS | func (o *AddRemoteRDSNodeOKBodyRemoteRDS) Validate(formats strfmt.Registry) error {
return nil
} | [
"func (o *AddRemoteRDSNodeBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddRemoteRDSNodeOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateRemoteRDS(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AddRemoteRDSNodeDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func AddServer(data map[string]string)(err error) {\n uuid := data[\"uuid\"]\n err = ndb.GetTokenByUuid(uuid); if err!=nil{logs.Error(\"Error loading node token: %s\",err); return err}\n ipuuid,portuuid,err := ndb.ObtainPortIp(uuid)\n if err != nil {\n logs.Error(\"AddServer ERROR Obtaining Port and IP for Add a new server into STAP: \"+err.Error())\n return err\n }\n err = nodeclient.AddServer(ipuuid,portuuid, data)\n if err != nil {\n logs.Error(\"node/AddServer ERROR http data request: \"+err.Error())\n return err\n }\n return nil\n}",
"func (o *AddContainerNodeBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (a *Client) AddRDSNode(params *AddRDSNodeParams) (*AddRDSNodeOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewAddRDSNodeParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"AddRDSNode\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/v0/inventory/Nodes/AddRDSNode\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &AddRDSNodeReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*AddRDSNodeOK), nil\n\n}",
"func (n *Nodes) AddRemote(clusterID uint64, nodeID uint64, target string) {\n\tif n.validate != nil && !n.validate(target) {\n\t\tplog.Panicf(\"invalid target %s\", target)\n\t}\n\tn.nmu.Lock()\n\tdefer n.nmu.Unlock()\n\tkey := raftio.GetNodeInfo(clusterID, nodeID)\n\tv, ok := n.nmu.nodes[key]\n\tif !ok {\n\t\tn.nmu.nodes[key] = target\n\t} else {\n\t\tif v != target {\n\t\t\tplog.Panicf(\"inconsistent target for %s, %s:%s\",\n\t\t\t\tlogutil.DescribeNode(clusterID, nodeID), v, target)\n\t\t}\n\t}\n}",
"func (c *Chord) addRemoteNode(remoteNode *node.RemoteNode) error {\n\tif !remoteNode.IsReady() {\n\t\treturn errors.New(\"Remote node is not ready yet\")\n\t}\n\n\terr := c.addSuccessor(remoteNode)\n\tif err != nil {\n\t\tlog.Errorf(\"Add %v to successors error: %v\", remoteNode, err)\n\t}\n\n\terr = c.addPredecessor(remoteNode)\n\tif err != nil {\n\t\tlog.Errorf(\"Add %v to predecessors error: %v\", remoteNode, err)\n\t}\n\n\tfor i := range c.fingerTable {\n\t\terr = c.addFingerTable(remoteNode, i)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Add %v to finger table %d error: %v\", remoteNode, i, err)\n\t\t}\n\t}\n\n\terr = c.addNeighbor(remoteNode)\n\tif err != nil {\n\t\tlog.Errorf(\"Add %v to neighbors error: %v\", remoteNode, err)\n\t}\n\n\treturn nil\n}",
"func (c *Client) NodeAdd(name, ID, serial string) error {\n\n\tme := \"NodeAdd\"\n\n\trn := rnNode(serial)\n\n\tdn := dnNode(serial)\n\n\tapi := \"/api/node/mo/uni/\" + dn + \".json\"\n\n\turl := c.getURL(api)\n\n\tj := fmt.Sprintf(`{\"fabricNodeIdentP\":{\"attributes\":{\"dn\":\"uni/%s\",\"serial\":\"%s\",\"nodeId\":\"%s\",\"name\":\"%s\",\"rn\":\"%s\",\"status\":\"created\"},\"children\":[]}}`,\n\t\tdn, serial, ID, name, rn)\n\n\tc.debugf(\"%s: url=%s json=%s\", me, url, j)\n\n\tbody, errPost := c.post(url, contentTypeJSON, bytes.NewBufferString(j))\n\tif errPost != nil {\n\t\treturn fmt.Errorf(\"%s: %v\", me, errPost)\n\t}\n\n\tc.debugf(\"%s: reply: %s\", me, string(body))\n\n\treturn parseJSONError(body)\n}",
"func (c *Client) Add(ctx context.Context, node NodeInfo) error {\n\trequest := protocol.Message{}\n\tresponse := protocol.Message{}\n\n\trequest.Init(4096)\n\tresponse.Init(4096)\n\n\tprotocol.EncodeAdd(&request, node.ID, node.Address)\n\n\tif err := c.protocol.Call(ctx, &request, &response); err != nil {\n\t\treturn err\n\t}\n\n\tif err := protocol.DecodeEmpty(&response); err != nil {\n\t\treturn err\n\t}\n\n\t// If the desired role is spare, there's nothing to do, since all newly\n\t// added nodes have the spare role.\n\tif node.Role == Spare {\n\t\treturn nil\n\t}\n\n\treturn c.Assign(ctx, node.ID, node.Role)\n}",
"func (ut *clusterPostBody) Validate() (err error) {\n\tif ut.NodePoolSize == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"nodePoolSize\"))\n\t}\n\tif ut.NamespaceID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`response`, \"namespace_id\"))\n\t}\n\tif ut.NodePoolSize != nil {\n\t\tif *ut.NodePoolSize < 3 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, *ut.NodePoolSize, 3, true))\n\t\t}\n\t}\n\tif ut.NodePoolSize != nil {\n\t\tif *ut.NodePoolSize > 11 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidRangeError(`response.nodePoolSize`, *ut.NodePoolSize, 11, false))\n\t\t}\n\t}\n\treturn\n}",
"func ValidateVnfdInstanceBody(jsonval []byte) error {\n\tlog.Debug()\n\tioReaderObj := strings.NewReader(schemaStrParameterizedInstance)\n\treturn json_schema_val.ValidateJSONBufAgainstSchema(jsonval, ioReaderObj, \"vnfdInstanceBody.json\")\n}",
"func createServer(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"Content-Type\", \"application/json\")\n\n\tvar newServer server\n\treqBody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t fmt.Println(err)\n\t\tfmt.Fprintf(w, \"Kindly enter data with the server's address, MSA and MTA network addresses only in order to create new server\")\n\t w.WriteHeader(http.StatusInternalServerError)\n\t return\n\t}\n\tnewServer.ID = strconv.Itoa(len(servers)+1)\n\n\tjson.Unmarshal(reqBody, &newServer)\n\tservers = append(servers, newServer)\n\tw.WriteHeader(http.StatusCreated)\n\n\tjson.NewEncoder(w).Encode(newServer)\n}",
"func (tapestry *Tapestry) addNode(remote Node, newnode Node) (neighbours []Node, err error) {\n\terr = makeRemoteNodeCall(remote, \"AddNode\", NodeRequest{remote, newnode}, &neighbours)\n\treturn\n}",
"func (m *ConnectionPost) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateLun(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateProtocolEndpoint(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func Insert(w http.ResponseWriter, r *http.Request) {\n\tname := r.FormValue(\"name\")\n\tif !conf.NameIsValid(name) {\n\t\thttp.Error(w, \"Name should consist only of English letters and numbers separated by dots.\", 400)\n\t\treturn\n\t}\n\tvalueJSON := r.FormValue(\"value\")\n\tif len(valueJSON) == 0 {\n\t\thttp.Error(w, \"Node value is not specified\", 400)\n\t\treturn\n\t}\n\tvar value interface{}\n\terr := json.Unmarshal(([]byte)(valueJSON), &value)\n\tif err != nil {\n\t\thttp.Error(w, \"Node value should be proper json. Can't parse node value: \"+err.Error(), 400)\n\t\treturn\n\t}\n\terr = conf.CheckInterfaceConsistsOfMapsAndStrings(value)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\n\tpath := conf.NameToPath(name)\n\terr = conf.CreateNode(path, value)\n\tif err != nil {\n\t\tw.WriteHeader(400)\n\t\tfmt.Fprintf(w, err.Error())\n\t\treturn\n\t}\n\n\tfmt.Fprintf(w, \"\")\n}",
"func (s *DNSSeeder) addNa(nNa *wire.NetAddress) bool {\n\n\tif len(s.nodes) > s.maxSize {\n\t\treturn false\n\t}\n\n\tif nNa.Port != s.Port {\n\t\treturn false\n\t}\n\n\t// generate the key and add to nodes\n\tk := net.JoinHostPort(nNa.IP.String(), strconv.Itoa(int(nNa.Port)))\n\n\tif _, dup := s.nodes[k]; dup == true {\n\t\treturn false\n\t}\n\n\t// if the reported timestamp suggests the netaddress has not been seen in the last 24 hours\n\t// then ignore this netaddress\n\tif (time.Now().Add(-(time.Hour * 24))).After(nNa.Timestamp) {\n\t\treturn false\n\t}\n\n\tnt := node{\n\t\tna: nNa,\n\t\tstatus: statusRG,\n\t\tdnsType: dns.TypeA,\n\t}\n\n\t// select the dns type based on the remote address type and port\n\tif x := nt.na.IP.To4(); x == nil {\n\t\tnt.dnsType = dns.TypeAAAA\n\t}\n\n\t// add the new node details to nodes\n\ts.nodes[k] = &nt\n\n\treturn true\n}",
"func (ctx Context) AddNode(count int, hardwareprofile, softwareprofile string) (AddNodeResult, error) {\n\tvar result AddNodeResult\n\n\t// we trust the signature\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\taddInput := AddNodeInput{inner{count, hardwareprofile, softwareprofile}}\n\tinput, errMarshal := json.Marshal(addInput)\n\tif errMarshal != nil {\n\t\treturn result, errMarshal\n\t}\n\turl := fmt.Sprintf(\"https://%s:8443/v1/nodes\", ctx.Address)\n\treq, errRequest := http.NewRequest(\"POST\", url, bytes.NewBuffer(input))\n\tif errRequest != nil {\n\t\treturn result, errRequest\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.SetBasicAuth(ctx.User, ctx.Password)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\tdefer resp.Body.Close()\n\n\tdecoder := json.NewDecoder(resp.Body)\n\tif err := decoder.Decode(&result); err != nil {\n\t\treturn result, err\n\t}\n\treturn result, nil\n}",
"func ValidateVnfdPostBody(body []byte) error {\n\tlog.Debug()\n\tioReaderObj := strings.NewReader(schemaStrVnfdInput)\n\treturn json_schema_val.ValidateJSONBufAgainstSchema(body, ioReaderObj, \"vnfdPostBody.json\")\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set of ARNs of the matched Image Builder Infrastructure Configurations. | func (o GetInfrastructureConfigurationsResultOutput) Arns() pulumi.StringArrayOutput {
return o.ApplyT(func(v GetInfrastructureConfigurationsResult) []string { return v.Arns }).(pulumi.StringArrayOutput)
} | [
"func DefaultARMImages() ImageSelector {\n\treturn defaultARMImages\n}",
"func (d *aciDriver) Config() map[string]string {\n\treturn map[string]string{\n\t\t\"CNAB_AZURE_VERBOSE\": \"Increase verbosity. true, false are supported values\",\n\t\t\"CNAB_AZURE_CLIENT_ID\": \"AAD Client ID for Azure account authentication - used to authenticate to Azure for ACI creation\",\n\t\t\"CNAB_AZURE_CLIENT_SECRET\": \"AAD Client Secret for Azure account authentication - used to authenticate to Azure for ACI creation\",\n\t\t\"CNAB_AZURE_TENANT_ID\": \"Azure AAD Tenant Id Azure account authentication - used to authenticate to Azure for ACI creation\",\n\t\t\"CNAB_AZURE_SUBSCRIPTION_ID\": \"Azure Subscription Id - this is the subscription to be used for ACI creation, if not specified the default subscription is used\",\n\t\t\"CNAB_AZURE_APP_ID\": \"Azure Application Id - this is the application to be used to authenticate to Azure\",\n\t\t\"CNAB_AZURE_RESOURCE_GROUP\": \"The name of the existing Resource Group to create the ACI instance in, if not specified a Resource Group will be created\",\n\t\t\"CNAB_AZURE_LOCATION\": \"The location to create the ACI Instance in\",\n\t\t\"CNAB_AZURE_NAME\": \"The name of the ACI instance to create - if not specified a name will be generated\",\n\t\t\"CNAB_AZURE_DELETE_RESOURCES\": \"Delete RG and ACI instance created - default is true useful to set to false for debugging - only deletes RG if it was created by the driver\",\n\t\t\"CNAB_AZURE_MSI_TYPE\": \"This can be set to user or system\",\n\t\t\"CNAB_AZURE_SYSTEM_MSI_ROLE\": \"The role to be asssigned to System MSI User - used if CNAB_AZURE_ACI_MSI_TYPE == system, if this is null or empty then the role defaults to contributor\",\n\t\t\"CNAB_AZURE_SYSTEM_MSI_SCOPE\": \"The scope to apply the role to System MSI User - will attempt to set scope to the Resource Group that the ACI Instance is being created in if not set\",\n\t\t\"CNAB_AZURE_USER_MSI_RESOURCE_ID\": \"The resource Id of the MSI User - required if CNAB_AZURE_ACI_MSI_TYPE == User \",\n\t\t\"CNAB_AZURE_PROPAGATE_CREDENTIALS\": \"If this is set to true the credentials used to Launch the Driver are propagated to the invocation image in an ENV variable, the CNAB_AZURE prefix will be relaced with AZURE_, default is false\",\n\t\t\"CNAB_AZURE_USE_CLIENT_CREDS_FOR_REGISTRY_AUTH\": \"If this is set to true the CNAB_AZURE_CLIENT_ID and CNAB_AZURE_CLIENT_SECRET are also used for authentication to ACR\",\n\t\t\"CNAB_AZURE_REGISTRY_USERNAME\": \"The username for authenticating to the container registry\",\n\t\t\"CNAB_AZURE_REGISTRY_PASSWORD\": \"The password for authenticating to the container registry\",\n\t\t\"CNAB_AZURE_STATE_FILESHARE\": \"The File Share for Azure State volume\",\n\t\t\"CNAB_AZURE_STATE_STORAGE_ACCOUNT_NAME\": \"The Storage Account for the Azure State File Share\",\n\t\t\"CNAB_AZURE_STATE_STORAGE_ACCOUNT_KEY\": \"The Storage Key for the Azure State File Share\",\n\t\t\"CNAB_AZURE_STATE_MOUNT_POINT\": \"The mount point location for state volume\",\n\t\t\"CNAB_AZURE_DELETE_OUTPUTS_FROM_FILESHARE\": \"Any Outputs Created in the fileshare are deleted on completion\",\n\t\t\"CNAB_AZURE_DEBUG_CONTAINER\": \"Replaces /cnab/app/run with tail -f /dev/null so that container can be connected to and debugged\",\n\t}\n}",
"func (m *Application) SetIdentifierUris(value []string)() {\n m.identifierUris = value\n}",
"func (ami *AMIs) ResourceIdentifiers() []string {\n\treturn ami.ImageIds\n}",
"func (o ImageOutputResourceContainerOutput) ImageUris() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v ImageOutputResourceContainer) []string { return v.ImageUris }).(pulumi.StringArrayOutput)\n}",
"func (o OceanFiltersOutput) Architectures() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v OceanFilters) []string { return v.Architectures }).(pulumi.StringArrayOutput)\n}",
"func getAmlAbisConfig() []archConfig {\n\treturn []archConfig{\n\t\t{\"arm\", \"armv7-a-neon\", \"\", []string{\"armeabi-v7a\"}},\n\t\t{\"arm64\", \"armv8-a\", \"\", []string{\"arm64-v8a\"}},\n\t\t{\"x86\", \"\", \"\", []string{\"x86\"}},\n\t\t{\"x86_64\", \"\", \"\", []string{\"x86_64\"}},\n\t}\n}",
"func (o GetImageOutputResourceContainerOutput) ImageUris() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetImageOutputResourceContainer) []string { return v.ImageUris }).(pulumi.StringArrayOutput)\n}",
"func (b *ATNConfigSet) Alts() *BitSet {\n\talts := NewBitSet()\n\tfor _, it := range b.configs {\n\t\talts.add(it.GetAlt())\n\t}\n\treturn alts\n}",
"func (a *ImageSliceAttribute) Anchors() []*anchor.Anchor {\n\treturn a.anchors\n}",
"func getSupportedCrdbImages() map[string]string {\n\tcrdbSupportedImages := make(map[string]string)\n\tsupportedVersions := getSupportedCrdbVersions()\n\tfor _, v := range supportedVersions {\n\t\tenvVar := fmt.Sprintf(\"%s%s\", RELATED_IMAGE_PREFIX, strings.ReplaceAll(v, \".\", \"_\"))\n\t\tcrdbSupportedImages[v] = os.Getenv(envVar)\n\t}\n\treturn crdbSupportedImages\n}",
"func (o ApplicationSpecSourceKustomizeOutput) Images() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v ApplicationSpecSourceKustomize) []string { return v.Images }).(pulumi.StringArrayOutput)\n}",
"func awsMatchersFromConfig(flags configurators.BootstrapFlags, cfg *servicecfg.Config) []types.AWSMatcher {\n\tif flags.Service.UseDiscoveryServiceConfig() {\n\t\treturn cfg.Discovery.AWSMatchers\n\t}\n\treturn cfg.Databases.AWSMatchers\n}",
"func (m *AppleVpnConfiguration) SetAssociatedDomains(value []string)() {\n err := m.GetBackingStore().Set(\"associatedDomains\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (m *AppleVpnConfiguration) GetAssociatedDomains()([]string) {\n val, err := m.GetBackingStore().Get(\"associatedDomains\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]string)\n }\n return nil\n}",
"func (m *Application) GetIdentifierUris()([]string) {\n return m.identifierUris\n}",
"func (o GetSecretsResultOutput) Arns() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v GetSecretsResult) []string { return v.Arns }).(pulumi.StringArrayOutput)\n}",
"func (a Analyzer) ImageConfigAnalyzerVersions() map[string]int {\n\tversions := map[string]int{}\n\tfor _, ca := range configAnalyzers {\n\t\tif isDisabled(ca.Type(), a.disabledAnalyzers) {\n\t\t\tversions[string(ca.Type())] = 0\n\t\t\tcontinue\n\t\t}\n\t\tversions[string(ca.Type())] = ca.Version()\n\t}\n\treturn versions\n}",
"func getNdkAbisConfig() []archConfig {\n\treturn []archConfig{\n\t\t{\"arm\", \"armv7-a\", \"\", []string{\"armeabi-v7a\"}},\n\t\t{\"arm64\", \"armv8-a\", \"\", []string{\"arm64-v8a\"}},\n\t\t{\"x86\", \"\", \"\", []string{\"x86\"}},\n\t\t{\"x86_64\", \"\", \"\", []string{\"x86_64\"}},\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Set of names of the matched Image Builder Infrastructure Configurations. | func (o GetInfrastructureConfigurationsResultOutput) Names() pulumi.StringArrayOutput {
return o.ApplyT(func(v GetInfrastructureConfigurationsResult) []string { return v.Names }).(pulumi.StringArrayOutput)
} | [
"func initImageNames() map[int]string {\n\tswitch getTestArch() {\n\tcase \"s390x\":\n\t\treturn map[int]string{\n\t\t\tbusyboxImage: \"busybox@sha256:4f47c01fa91355af2865ac10fef5bf6ec9c7f42ad2321377c21e844427972977\",\n\t\t\tregistryImage: \"ibmcom/registry:2.6.2.5\",\n\t\t\tkanikoImage: \"gcr.io/kaniko-project/executor:s390x-9ed158c1f63a059cde4fd5f8b95af51d452d9aa7\",\n\t\t\tdockerizeImage: \"ibmcom/dockerize-s390x\",\n\t\t}\n\tcase \"ppc64le\":\n\t\treturn map[int]string{\n\t\t\tbusyboxImage: \"busybox@sha256:4f47c01fa91355af2865ac10fef5bf6ec9c7f42ad2321377c21e844427972977\",\n\t\t\tregistryImage: \"ppc64le/registry:2\",\n\t\t\tkanikoImage: \"ibmcom/kaniko-project-executor-ppc64le:v0.17.1\",\n\t\t\tdockerizeImage: \"ibmcom/dockerize-ppc64le\",\n\t\t}\n\tdefault:\n\t\treturn map[int]string{\n\t\t\tbusyboxImage: \"busybox@sha256:895ab622e92e18d6b461d671081757af7dbaa3b00e3e28e12505af7817f73649\",\n\t\t\tregistryImage: \"registry\",\n\t\t\tkanikoImage: \"gcr.io/kaniko-project/executor:v1.3.0\",\n\t\t\tdockerizeImage: \"jwilder/dockerize\",\n\t\t}\n\t}\n}",
"func (d *aciDriver) Config() map[string]string {\n\treturn map[string]string{\n\t\t\"CNAB_AZURE_VERBOSE\": \"Increase verbosity. true, false are supported values\",\n\t\t\"CNAB_AZURE_CLIENT_ID\": \"AAD Client ID for Azure account authentication - used to authenticate to Azure for ACI creation\",\n\t\t\"CNAB_AZURE_CLIENT_SECRET\": \"AAD Client Secret for Azure account authentication - used to authenticate to Azure for ACI creation\",\n\t\t\"CNAB_AZURE_TENANT_ID\": \"Azure AAD Tenant Id Azure account authentication - used to authenticate to Azure for ACI creation\",\n\t\t\"CNAB_AZURE_SUBSCRIPTION_ID\": \"Azure Subscription Id - this is the subscription to be used for ACI creation, if not specified the default subscription is used\",\n\t\t\"CNAB_AZURE_APP_ID\": \"Azure Application Id - this is the application to be used to authenticate to Azure\",\n\t\t\"CNAB_AZURE_RESOURCE_GROUP\": \"The name of the existing Resource Group to create the ACI instance in, if not specified a Resource Group will be created\",\n\t\t\"CNAB_AZURE_LOCATION\": \"The location to create the ACI Instance in\",\n\t\t\"CNAB_AZURE_NAME\": \"The name of the ACI instance to create - if not specified a name will be generated\",\n\t\t\"CNAB_AZURE_DELETE_RESOURCES\": \"Delete RG and ACI instance created - default is true useful to set to false for debugging - only deletes RG if it was created by the driver\",\n\t\t\"CNAB_AZURE_MSI_TYPE\": \"This can be set to user or system\",\n\t\t\"CNAB_AZURE_SYSTEM_MSI_ROLE\": \"The role to be asssigned to System MSI User - used if CNAB_AZURE_ACI_MSI_TYPE == system, if this is null or empty then the role defaults to contributor\",\n\t\t\"CNAB_AZURE_SYSTEM_MSI_SCOPE\": \"The scope to apply the role to System MSI User - will attempt to set scope to the Resource Group that the ACI Instance is being created in if not set\",\n\t\t\"CNAB_AZURE_USER_MSI_RESOURCE_ID\": \"The resource Id of the MSI User - required if CNAB_AZURE_ACI_MSI_TYPE == User \",\n\t\t\"CNAB_AZURE_PROPAGATE_CREDENTIALS\": \"If this is set to true the credentials used to Launch the Driver are propagated to the invocation image in an ENV variable, the CNAB_AZURE prefix will be relaced with AZURE_, default is false\",\n\t\t\"CNAB_AZURE_USE_CLIENT_CREDS_FOR_REGISTRY_AUTH\": \"If this is set to true the CNAB_AZURE_CLIENT_ID and CNAB_AZURE_CLIENT_SECRET are also used for authentication to ACR\",\n\t\t\"CNAB_AZURE_REGISTRY_USERNAME\": \"The username for authenticating to the container registry\",\n\t\t\"CNAB_AZURE_REGISTRY_PASSWORD\": \"The password for authenticating to the container registry\",\n\t\t\"CNAB_AZURE_STATE_FILESHARE\": \"The File Share for Azure State volume\",\n\t\t\"CNAB_AZURE_STATE_STORAGE_ACCOUNT_NAME\": \"The Storage Account for the Azure State File Share\",\n\t\t\"CNAB_AZURE_STATE_STORAGE_ACCOUNT_KEY\": \"The Storage Key for the Azure State File Share\",\n\t\t\"CNAB_AZURE_STATE_MOUNT_POINT\": \"The mount point location for state volume\",\n\t\t\"CNAB_AZURE_DELETE_OUTPUTS_FROM_FILESHARE\": \"Any Outputs Created in the fileshare are deleted on completion\",\n\t\t\"CNAB_AZURE_DEBUG_CONTAINER\": \"Replaces /cnab/app/run with tail -f /dev/null so that container can be connected to and debugged\",\n\t}\n}",
"func (p *eksClusterProvider) ConfigurationItems() config.ConfigurationSet {\n\tcs := aws.SharedConfig()\n\n\tcs.String(\"region-filter\", \"\", \"A filter to apply to the AWS regions list, e.g. 'us-' will only show US regions\") //nolint: errcheck\n\tcs.String(\"role-arn\", \"\", \"ARN of the AWS role to be assumed\") //nolint: errcheck\n\tcs.String(\"role-filter\", \"\", \"A filter to apply to the roles list, e.g. 'EKS' will only show roles that contain EKS in the name\") //nolint: errcheck\n\n\treturn cs\n}",
"func imageNamesMapping() map[string]string {\n\n\tswitch getTestArch() {\n\tcase \"s390x\":\n\t\treturn map[string]string{\n\t\t\t\"registry\": getTestImage(registryImage),\n\t\t\t\"node\": \"node:alpine3.11\",\n\t\t\t\"gcr.io/cloud-builders/git\": \"alpine/git:latest\",\n\t\t\t\"docker:dind\": \"ibmcom/docker-s390x:dind\",\n\t\t\t\"docker\": \"docker:18.06.3\",\n\t\t\t\"mikefarah/yq:3\": \"danielxlee/yq:2.4.0\",\n\t\t\t\"stedolan/jq\": \"ibmcom/jq-s390x:latest\",\n\t\t\t\"gcr.io/kaniko-project/executor:v1.3.0\": getTestImage(kanikoImage),\n\t\t}\n\tcase \"ppc64le\":\n\t\treturn map[string]string{\n\t\t\t\"registry\": getTestImage(registryImage),\n\t\t\t\"node\": \"node:alpine3.11\",\n\t\t\t\"gcr.io/cloud-builders/git\": \"alpine/git:latest\",\n\t\t\t\"docker:dind\": \"ibmcom/docker-ppc64le:19.03-dind\",\n\t\t\t\"docker\": \"docker:18.06.3\",\n\t\t\t\"mikefarah/yq:3\": \"danielxlee/yq:2.4.0\",\n\t\t\t\"stedolan/jq\": \"ibmcom/jq-ppc64le:latest\",\n\t\t\t\"gcr.io/kaniko-project/executor:v1.3.0\": getTestImage(kanikoImage),\n\t\t}\n\n\t}\n\n\treturn make(map[string]string)\n}",
"func (c Config) ConfigNames() []string {\n\tnames := []string{}\n\tfor k := range c.Configs {\n\t\tnames = append(names, k)\n\t}\n\tsort.Strings(names)\n\treturn names\n}",
"func (s *Split) Configurations() map[string]string {\n\treturn s.splitData.Configurations\n}",
"func (m *cgmRegistry) Names() []string { return nil }",
"func getSupportedCrdbImages() map[string]string {\n\tcrdbSupportedImages := make(map[string]string)\n\tsupportedVersions := getSupportedCrdbVersions()\n\tfor _, v := range supportedVersions {\n\t\tenvVar := fmt.Sprintf(\"%s%s\", RELATED_IMAGE_PREFIX, strings.ReplaceAll(v, \".\", \"_\"))\n\t\tcrdbSupportedImages[v] = os.Getenv(envVar)\n\t}\n\treturn crdbSupportedImages\n}",
"func (o RegistryTaskDockerStepOutput) ImageNames() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v RegistryTaskDockerStep) []string { return v.ImageNames }).(pulumi.StringArrayOutput)\n}",
"func (r *MockAdapter) GetImageNames() ([]string, error) {\n\tlog.Debug(\"MockRegistry::LoadSpecs\")\n\n\tspecYaml, err := ioutil.ReadFile(MockFile)\n\tif err != nil {\n\t\tlog.Debugf(\"Failed to read registry data from %s\", MockFile)\n\t\treturn nil, err\n\t}\n\n\tvar parsedData struct {\n\t\tApps []*bundle.Spec `yaml:\"apps\"`\n\t}\n\n\terr = yaml.Unmarshal(specYaml, &parsedData)\n\tif err != nil {\n\t\tlog.Error(\"Failed to unmarshal yaml file\")\n\t\treturn nil, err\n\t}\n\n\tlog.Debugf(\"Loaded Specs: %v\", parsedData)\n\n\tlog.Infof(\"Loaded [ %d ] specs from %s registry\", len(parsedData.Apps), \"Mock\")\n\tvar names []string\n\tr.specs = map[string]*bundle.Spec{}\n\n\tfor _, spec := range parsedData.Apps {\n\t\tr.specs[spec.Image] = spec\n\t\tnames = append(names, spec.Image)\n\t}\n\treturn names, nil\n}",
"func (p *ProviderConfig) GetAllProviderNames() []string {\n\tproviderNames := make([]string, 0)\n\n\tfor name := range p.Alibaba {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Anexia {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Aws {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Azure {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Digitalocean {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Fake {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Gcp {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Hetzner {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Kubevirt {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Openstack {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Packet {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\tfor name := range p.Vsphere {\n\t\tproviderNames = append(providerNames, name)\n\t}\n\n\treturn providerNames\n}",
"func (c *Controller) DockerFilters() map[string][]string {\n labelFilter := \"mapreduced.ctl=\\\"\" + c.LabelValue + \"\\\"\"\n\n return map[string][]string{\"label\": []string{labelFilter}}\n}",
"func GetConfigsByDI(dimensionInfo string) map[string]interface{} {\n\treturn DefaultConf.ConfigFactory.GetConfigurationsByDimensionInfo(dimensionInfo)\n}",
"func getConfigServices(d *schema.ResourceData) (services []string) {\n\tif v, ok := d.GetOk(\"services\"); ok {\n\t\tfor _, svc := range v.(*schema.Set).List() {\n\t\t\tservices = append(services, svc.(string))\n\t\t}\n\t}\n\treturn\n}",
"func GetImageOverrides() map[string]string {\n\timageOverrides := make(map[string]string)\n\n\t// First check for environment variables containing the 'OPERAND_IMAGE_' prefix\n\tfor _, e := range os.Environ() {\n\t\tkeyValuePair := strings.SplitN(e, \"=\", 2)\n\t\tif strings.HasPrefix(keyValuePair[0], OperandImagePrefix) {\n\t\t\tkey := strings.ToLower(strings.Replace(keyValuePair[0], OperandImagePrefix, \"\", -1))\n\t\t\timageOverrides[key] = keyValuePair[1]\n\t\t}\n\t}\n\n\t// If entries exist containing operand image prefix, return\n\tif len(imageOverrides) > 0 {\n\t\tlogf.Info(\"Found image overrides from environment variables set by operand image prefix\")\n\t\treturn imageOverrides\n\t}\n\n\t// If no image overrides found, check 'RELATED_IMAGE_' prefix\n\tfor _, e := range os.Environ() {\n\t\tkeyValuePair := strings.SplitN(e, \"=\", 2)\n\t\tif strings.HasPrefix(keyValuePair[0], OSBSImagePrefix) {\n\t\t\tkey := strings.ToLower(strings.Replace(keyValuePair[0], OSBSImagePrefix, \"\", -1))\n\t\t\timageOverrides[key] = keyValuePair[1]\n\t\t}\n\t}\n\n\t// If entries exist containing related image prefix, return\n\tif len(imageOverrides) > 0 {\n\t\tlogf.Info(\"Found image overrides from environment variables set by related image prefix\")\n\t}\n\n\treturn imageOverrides\n}",
"func AllConfigOptions() []string {\n\treturn allConfigOptions\n}",
"func GetNameList(listConf Config) (nameList []string) {\n\tfor k := range listConf.Server {\n\t\tnameList = append(nameList, k)\n\t}\n\treturn\n}",
"func GetAll() interface{} { return viper.AllKeys() }",
"func (r QuayAdapter) GetImageNames() ([]string, error) {\n\tlog.Debug(\"QuayAdapter::GetImages\")\n\tlog.Debug(\"BundleSpecLabel: %s\", BundleSpecLabel)\n\tlog.Debug(\"Loading image list for quay.io Org: [ %v ]\", r.config.Org)\n\n\tvar imageList []string\n\n\t// check if we're configured for specific images\n\tif len(r.config.Images) > 0 {\n\t\tlog.Debugf(\"Configured to use images: %v\", r.config.Images)\n\t\timageList = append(imageList, r.config.Images...)\n\t}\n\n\t// discover images\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(quayCatalogURL, r.config.URL, r.config.Org), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", \"application/json\")\n\treq.Header.Set(\"Authorization\", fmt.Sprintf(\"Bearer %v\", r.config.Token))\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to load catalog response at %s - %v\", fmt.Sprintf(quayCatalogURL, r.config.URL, r.config.Org), err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tcatalogResp := quayImageResponse{}\n\terr = json.NewDecoder(resp.Body).Decode(&catalogResp)\n\tif err != nil {\n\t\tlog.Errorf(\"Failed to decode Catalog response from '%s'\", fmt.Sprintf(quayCatalogURL, r.config.URL, r.config.Org))\n\t\treturn nil, err\n\t}\n\n\tfor _, repo := range catalogResp.Repositories {\n\t\timageList = append(imageList, repo.Name)\n\t}\n\n\tif len(imageList) == 0 {\n\t\tlog.Warn(\"image list is empty. No images were discovered\")\n\t\treturn imageList, nil\n\t}\n\n\tvar uniqueList []string\n\timageMap := make(map[string]struct{})\n\tfor _, image := range imageList {\n\t\timageMap[image] = struct{}{}\n\t}\n\n\t// create a unique image list\n\tfor key := range imageMap {\n\t\tuniqueList = append(uniqueList, key)\n\t}\n\treturn uniqueList, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wsMessage handles browser requests to /msg/ | func wsMessage(w http.ResponseWriter, r *http.Request) {
// Get session, continue only if authenticated
sok, vok := checkLogin(r, cfgMap["session name"], "user")
if !sok || !vok {
http.Error(w, http.StatusText(http.StatusInternalServerError),
http.StatusInternalServerError)
return
}
cc := make(chan bool)
// upgrade to websocket
c, err := upgrader.Upgrade(w, r, nil)
defer c.Close()
if err != nil {
log.Println("upgrade:", err)
return
}
// handle websocket incoming browser messages
go func(c *websocket.Conn) {
for {
_, message, err := c.ReadMessage()
if err != nil {
log.Println("read:", err)
cc <- true
return
}
log.Printf("recv: %s", message)
}
}(c)
// send websocket message to browser
for {
select {
case <-cc:
return
default:
response := []byte(<-wsChan)
// mesage type = 1
err = c.WriteMessage(1, response)
if err != nil {
log.Println("ws write err:", err)
break
}
time.Sleep(time.Second)
}
}
} | [
"func (w *BaseWebsocketClient) OnWsMessage(payload []byte, isBinary bool) {}",
"func (t *T) Message(uri string, data types.Dict) { t.Write(websocket.Transport(uri, data)) }",
"func sendMessage(webSocket *websocket.Conn, msg Message) (err error) {\n\terr = websocket.JSON.Send(webSocket, &msg)\n\treturn\n\n}",
"func (ctx *Context) MessageHandler(w http.ResponseWriter, r *http.Request) {\n\t//get the current user\n\tss := &SessionState{}\n\t_, err := sessions.GetState(r, ctx.SessionKey, ctx.SessionStore, ss)\n\tif err != nil {\n\t\thttp.Error(w, \"error getting current session : \"+err.Error(), http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tif r.Method == \"POST\" {\n\n\t\td := json.NewDecoder(r.Body)\n\t\tnm := &messages.NewMessage{}\n\t\tif err := d.Decode(nm); err != nil {\n\t\t\thttp.Error(w, \"invalid JSON\", http.StatusBadRequest)\n\t\t}\n\n\t\t//Validate the models.NewMessage\n\t\terr := nm.Validate()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"invalid message: \"+err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tm, err := ctx.MessageStore.InsertMessage(ss.User.ID, nm)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"error inserting message: \"+err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\t//Notify client of new message through websocket\n\t\tn, err := m.ToNewMessageEvent()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"error creating message event: \"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\tctx.Notifier.Notify(n)\n\n\t\tw.Header().Add(headerContentType, contentTypeJSONUTF8)\n\t\tencoder := json.NewEncoder(w)\n\t\tencoder.Encode(m)\n\n\t}\n}",
"func receiveMessage(webSocket *websocket.Conn) (msg Message, err error) {\n\terr = websocket.JSON.Receive(webSocket, &msg)\n\n\treturn\n}",
"func MessageHandler(w http.ResponseWriter, r *http.Request) {\n w.WriteHeader(http.StatusOK)\n\n if(r.FormValue(\"Update\") == \"\") { // Send messages to frontend\n \t// Send last messages, keep track of index of last one sent\n\t json := simplejson.New()\n\n\t nb_messages := len(gossiper.RumorMessages)\n\n\t if(gossiper.LastRumorSentIndex >= nb_messages-1) {\n\t \treturn\n\t }\n\n\t messageArray := []string{}\n\t var msg string\n\n\t for i := gossiper.LastRumorSentIndex + 1; i < nb_messages; i++ {\n\t \tmsg = gossiper.RumorMessages[i].Origin + \" : \" + gossiper.RumorMessages[i].Text\n\t \tmessageArray = append(messageArray, msg)\n\t }\n\n\n\t gossiper.LastRumorSentIndex = nb_messages - 1\n\n\t\tjson.Set(\"Message\", messageArray)\n\n\t\tpayload, err := json.MarshalJSON()\n\t\tisError(err)\n\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(payload)\n\t} else if(r.FormValue(\"Message\") != \"\"){ // Get message from frontend\n\t\t// Do as in \"listenUIPort\" \n\n\t\tmsg := r.FormValue(\"Message\")\n\n\t\tfmt.Println(\"CLIENT MESSAGE\", msg) \t\n\t\tfmt.Println(\"PEERS\", gossiper.Peers_as_single_string)\n\n\t\tgossiper.SafeNextClientMessageIDs.mux.Lock()\n\t\trumorMessage := RumorMessage{\n\t\t\tOrigin: gossiper.Name, \n\t ID: gossiper.SafeNextClientMessageIDs.NextClientMessageID,\n\t Text: msg,\n\t\t}\n\n\t gossiper.SafeNextClientMessageIDs.NextClientMessageID++\n\t gossiper.SafeNextClientMessageIDs.mux.Unlock()\n\n\t\tstateID := updateStatusAndRumorArray(rumorMessage, false)\n\t\t\n\t\tif(len(gossiper.Peers) > 0) {\n\t\t\tgo rumormongering(rumorMessage, false)\n\t\t\tif(stateID != \"present\") {\n \t\tfmt.Println(\"Error : client message\", rumorMessage, \"has state id :\", stateID)\n \t}\n\t\t}\n\t}\n}",
"func (tv *TV) MessageHandler() (err error) {\n\tdefer func() {\n\t\ttv.resMutex.Lock()\n\t\tfor _, ch := range tv.res {\n\t\t\tclose(ch)\n\t\t}\n\t\ttv.res = nil\n\t\ttv.resMutex.Unlock()\n\t}()\n\n\tfor {\n\t\tmt, p, err := tv.ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif mt != websocket.TextMessage {\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg := Message{}\n\n\t\terr = json.Unmarshal(p, &msg)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttv.resMutex.Lock()\n\t\tch := tv.res[msg.ID]\n\t\ttv.resMutex.Unlock()\n\n\t\tch <- msg\n\t}\n}",
"func MessageClient(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase http.MethodGet:\n\t\tgetMessageClient(w, r)\n\tdefault:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t}\n}",
"func WebAPIMessageSend(w http.ResponseWriter, r *http.Request) {\n\tviewData := BaseViewData(w, r)\n\tresponse := webAPIMessageSendResponse{Successful: false}\n\n\tif len(r.FormValue(\"message\")) == 0 {\n\t\tresponse.HasError = true\n\t\tresponse.Error = \"No Message\"\n\t\tRenderJSON(w, response)\n\t\treturn\n\t}\n\n\tif viewData.Session == nil {\n\t\tresponse.HasError = true\n\t\tresponse.Error = \"Not Logged In\"\n\t\tRenderJSON(w, response)\n\t\treturn\n\t}\n\n\tif !viewData.ValidCsrf(r) {\n\t\tresponse.HasError = true\n\t\tresponse.Error = \"CSRF Error\"\n\t\tRenderJSON(w, response)\n\t\treturn\n\t}\n\n\targs := URIArgs(r)\n\tif len(args) != 1 {\n\t\tresponse.HasError = true\n\t\tresponse.Error = \"Invalid Arguments\"\n\t\tRenderJSON(w, response)\n\t\treturn\n\t}\n\n\tidStr := args[0]\n\tid, err := strconv.Atoi(idStr)\n\tif err != nil {\n\t\tresponse.HasError = true\n\t\tresponse.Error = \"Invalid conversation ID\"\n\t\tRenderJSON(w, response)\n\t\treturn\n\t}\n\n\toffer, err := models.GetOfferByID(Base.Db, id)\n\tif err != nil || offer == nil {\n\t\tresponse.HasError = true\n\t\tresponse.Error = \"Not Found\"\n\t\tRenderJSON(w, response)\n\t\treturn\n\t}\n\n\tif offer.Status != models.OfferAccepted {\n\t\tresponse.HasError = true\n\t\tresponse.Error = \"Not a Conversation\"\n\t\tRenderJSON(w, response)\n\t\treturn\n\t}\n\n\tmessage := models.Message{\n\t\tMessage: r.FormValue(\"message\"),\n\t\tOffer: *offer,\n\t\tSender: viewData.Session.User,\n\t}\n\tvar otherUser *models.User\n\tif offer.Buyer.ID == viewData.Session.User.ID {\n\t\tmessage.Recepient.ID = offer.Seller.ID\n\t\totherUser = &offer.Seller\n\t} else {\n\t\tmessage.Recepient.ID = offer.Buyer.ID\n\t\totherUser = &offer.Buyer\n\t}\n\n\tok, messageErr := message.Create(Base.Db)\n\tif err != nil {\n\t\tresponse.HasError = true\n\t\tresponse.MessageError = *messageErr\n\t\tRenderJSON(w, response)\n\t\treturn\n\t}\n\n\tBase.WebsockChannel <- wsock.UserJSONNotification(otherUser,\n\t\tnotifNewMessage,\n\t\tmessage, true)\n\tBase.WebsockChannel <- wsock.UserJSONNotification(&viewData.Session.User,\n\t\tnotifNewMessage,\n\t\tmessage, false)\n\n\tif ok {\n\t\tresponse.Successful = true\n\t}\n\tRenderJSON(w, response)\n}",
"func ws_SendMsg(ws *websocket.Conn, send_channel SendChannel) {\n\tfor {\n\t\tselect {\n\t\tcase send_msg := <-send_channel.containers:\n\t\t\tlog.Printf(\"[%s] containers sendMessage= \", __FILE__, send_msg)\n\t\t\twebsocket.JSON.Send(ws, send_msg)\n\t\tcase send_msg := <-send_channel.updateinfo:\n\t\t\tlog.Printf(\"[%s] update sendMessage=\", __FILE__, send_msg)\n\t\t}\n\t}\n}",
"func (api *WebSocketAPI) DispatchMessageEvent(data []byte) error {\n\tif err := api.Battle.DispatchMessageEvent(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (bot *Hitbot) MessageHandler() {\n\tfor {\n\t\t_, p, err := bot.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t//log.Printf(\"Message: %v\", string(p)) //debug info\n\t\tif string(p[:3]) == \"2::\" {\n\t\t\tbot.conn.WriteMessage(websocket.TextMessage, []byte(\"2::\"))\n\t\t\t//log.Print(\"Ping!\")\n\t\t\tcontinue\n\t\t} else if string(p[:3]) == \"1::\" {\n\t\t\tlog.Print(\"Connection successful!\")\n\t\t\tfor _, channel := range bot.channels {\n\t\t\t\tbot.joinChannel(channel)\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if string(p[:4]) == \"5:::\" {\n\t\t\tbot.parseMessage(p[4:])\n\t\t}\n\t}\n}",
"func (q *Q) WebNewMessage(response http.ResponseWriter, request *http.Request) {\n\tvars := mux.Vars(request)\n\tmsg := q.Message(vars[\"topic\"])\n\tif msg != nil {\n\t\t// woot woot! message found\n\t\tresponse.WriteHeader(http.StatusOK)\n\t\tfmt.Fprintf(response, msg.String())\n\t} else {\n\t\t// boo, couldn't find a message\n\t\tresponse.WriteHeader(http.StatusNotFound)\n\t\tfmt.Fprintf(response, \"{}\")\n\t}\n}",
"func (srv *Server) handleMessage(msg *Message) error {\n\tswitch msg.msgType {\n\tcase MsgSignalBinary:\n\t\tfallthrough\n\tcase MsgSignalUtf8:\n\t\tfallthrough\n\tcase MsgSignalUtf16:\n\t\tsrv.handleSignal(msg)\n\n\tcase MsgRequestBinary:\n\t\tfallthrough\n\tcase MsgRequestUtf8:\n\t\tfallthrough\n\tcase MsgRequestUtf16:\n\t\tsrv.handleRequest(msg)\n\n\tcase MsgRestoreSession:\n\t\treturn srv.handleSessionRestore(msg)\n\tcase MsgCloseSession:\n\t\treturn srv.handleSessionClosure(msg)\n\t}\n\treturn nil\n}",
"func (h *messageHandler) HandleMessage(m *nsq.Message) error {\n\t//Process the Message\n\tvar request Message\n\tif err := json.Unmarshal(m.Body, &request); err != nil {\n\t\tlog.Println(\"Error when Unmarshaling the message body, Err : \", err)\n\t\t// Returning a non-nil error will automatically send a REQ command to NSQ to re-queue the message.\n\t\treturn err\n\t}\n\t//Print the Message\n\tlog.Println(\"Message\")\n\tlog.Println(\"--------------------\")\n\tlog.Println(\"Name : \", request.Name)\n\tlog.Println(\"Content : \", request.Content)\n\tlog.Println(\"Timestamp : \", request.Timestamp)\n\tlog.Println(\"--------------------\")\n\tlog.Println(\"\")\n\n\t//sendMsg(request.Name, request.Content)\n\n\t// Will automatically set the message as finish\n\treturn nil\n}",
"func GetMessageWS(c *gin.Context) {\n\thandler := websocket.Handler(func(conn *websocket.Conn) {\n\n\t\tfor _, row := range database.LocalDB {\n\t\t\tconn.Write([]byte(row))\n\t\t}\n\n\t\tio.Copy(conn, conn)\n\n\t})\n\n\thandler.ServeHTTP(c.Writer, c.Request)\n}",
"func (s *SSHSession) receiveWsMsg(wsConn *websocket.Conn, exitCh chan bool, key string) {\n\t//tells other go routine quit\n\tdefer setQuit(exitCh)\n\tvar cmdStr string\n\tfor {\n\t\tselect {\n\t\tcase <-exitCh:\n\t\t\treturn\n\t\tdefault:\n\t\t\t//read websocket msg\n\t\t\t_, wsData, err := wsConn.ReadMessage()\n\t\t\tif err != nil {\n\t\t\t\tcode := err.(*websocket.CloseError).Code\n\t\t\t\tif code == 1000 || code == 1001 {\n\t\t\t\t\thub.delete(key)\n\t\t\t\t}\n\t\t\t\tcommon.Log.Debugf(\"reading webSocket message failed\\t%s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t//unmashal bytes into struct\n\t\t\tvar msgObj wsMsg\n\t\t\terr = json.Unmarshal(wsData, &msgObj)\n\t\t\tswitch msgObj.Type {\n\t\t\tcase wsMsgResize:\n\t\t\t\tif msgObj.Cols > 0 && msgObj.Rows > 0 {\n\t\t\t\t\tif err := s.Session.WindowChange(msgObj.Rows, msgObj.Cols); err != nil {\n\t\t\t\t\t\tcommon.Log.Debugf(\"ssh pty change windows size failed:\\t\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase wsMsgCmd:\n\t\t\t\t// 写入cmd到shell stdin\n\t\t\t\tif _, err := s.StdinPipe.Write(utils.Str2Bytes(msgObj.Cmd)); err != nil {\n\t\t\t\t\tcommon.Log.Debugf(\"ws cmd bytes write to ssh.stdin pipe failed:\\t\", err)\n\t\t\t\t}\n\t\t\t\tif msgObj.Cmd == \"\\r\" || msgObj.Cmd == \"\\n\" {\n\t\t\t\t\tif cmdStr != \"\" {\n\t\t\t\t\t\tfmt.Println(compressStr(cmdStr))\n\t\t\t\t\t\tcmdStr = \"\"\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t//matched,_ :=regexp.MatchString(\"[\\\\u0001-\\\\u0003]\",msgObj.Cmd)\n\t\t\t\t\t//if matched{\n\t\t\t\t\t//\tcmdStr =cmdStr + msgObj.Cmd\n\t\t\t\t\t//} else{\n\t\t\t\t\t//\tfmt.Println(\"特殊符号\")\n\t\t\t\t\t//}\n\t\t\t\t\tswitch msgObj.Cmd {\n\t\t\t\t\t// ctrl + c\n\t\t\t\t\tcase \"\\u0003\":\n\t\t\t\t\t\tcmdStr = \"\"\n\t\t\t\t\t// 退格\n\t\t\t\t\tcase \"\\u007F\":\n\t\t\t\t\t\tlastStr := cmdStr[len(cmdStr)-1:]\n\t\t\t\t\t\tcmdStr = strings.TrimSuffix(cmdStr, lastStr)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tcmdStr = cmdStr + msgObj.Cmd\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func Message(w http.ResponseWriter, status bool, msg string) {\n\tRespond(w, map[string]interface{}{\"status\": status, \"message\": msg})\n}",
"func WebsocketMessengerHandler(w http.ResponseWriter, r *http.Request) {\n\tpagestate := r.FormValue(\"id\")\n\n\tif !page.HasPage(pagestate) {\n\t\t// The page manager has no record of the pagestate, so either it is expired or never existed\n\t\treturn // TODO: return error?\n\t}\n\n\t// Inject the pagestate as the client ID so the next handler down can read it\n\tctx := context.WithValue(r.Context(), goradd.WebSocketContext, pagestate)\n\tmessageServer.Messenger.(*ws.WsMessenger).WebSocketHandler().ServeHTTP(w, r.WithContext(ctx))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
wsChanSend build and send messages to connected browsers, the channel is shared between connections | func wsChanSend() {
log.Println("wschan running...")
i := 1
for {
// send stuff to clients
// TODO: solve multiple clients connecting
wsChan <- "test: " + strconv.Itoa(i)
i++
}
} | [
"func ws_SendMsg(ws *websocket.Conn, send_channel SendChannel) {\n\tfor {\n\t\tselect {\n\t\tcase send_msg := <-send_channel.containers:\n\t\t\tlog.Printf(\"[%s] containers sendMessage= \", __FILE__, send_msg)\n\t\t\twebsocket.JSON.Send(ws, send_msg)\n\t\tcase send_msg := <-send_channel.updateinfo:\n\t\t\tlog.Printf(\"[%s] update sendMessage=\", __FILE__, send_msg)\n\t\t}\n\t}\n}",
"func wsMessage(w http.ResponseWriter, r *http.Request) {\n\n\t// Get session, continue only if authenticated\n\tsok, vok := checkLogin(r, cfgMap[\"session name\"], \"user\")\n\tif !sok || !vok {\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError),\n\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tcc := make(chan bool)\n\t// upgrade to websocket\n\tc, err := upgrader.Upgrade(w, r, nil)\n\tdefer c.Close()\n\tif err != nil {\n\t\tlog.Println(\"upgrade:\", err)\n\t\treturn\n\t}\n\n\t// handle websocket incoming browser messages\n\tgo func(c *websocket.Conn) {\n\t\tfor {\n\t\t\t_, message, err := c.ReadMessage()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"read:\", err)\n\t\t\t\tcc <- true\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlog.Printf(\"recv: %s\", message)\n\t\t}\n\t}(c)\n\n\t// send websocket message to browser\n\tfor {\n\t\tselect {\n\t\tcase <-cc:\n\t\t\treturn\n\t\tdefault:\n\t\t\tresponse := []byte(<-wsChan)\n\t\t\t// mesage type = 1\n\t\t\terr = c.WriteMessage(1, response)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"ws write err:\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}\n}",
"func (h *hub) update() {\n\t//send each channel its client state\n\tfor c := range h.connections {\n\t\tmsg := c.client.GetMessage()\n\t\tc.ws.WriteMessage(websocket.BinaryMessage, msg)\n\t}\n}",
"func sendLoop() {\n\tif currentWebsocket == nil {\n\t\tcolorLog(\"[INFO] BW: No connection, wait for it.\\n\")\n\t\tcmd := <-connectChannel\n\t\tif \"QUIT\" == cmd.Action {\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor {\n\t\tnext, ok := <-toBrowserChannel\n\t\tif !ok {\n\t\t\tcolorLog(\"[WARN] BW: Send channel was closed.\\n\")\n\t\t\tbreak\n\t\t}\n\n\t\tif \"QUIT\" == next.Action {\n\t\t\tbreak\n\t\t}\n\n\t\tif currentWebsocket == nil {\n\t\t\tcolorLog(\"[INFO] BW: No connection, wait for it.\\n\")\n\t\t\tcmd := <-connectChannel\n\t\t\tif \"QUIT\" == cmd.Action {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\twebsocket.JSON.Send(currentWebsocket, &next)\n\t\tcolorLog(\"[SUCC] BW: Sent %v.\\n\", next)\n\t}\n\n\tcolorLog(\"[WARN] BW: Exit send loop.\\n\")\n}",
"func (w *websocketPeer) sendHandler() {\n\tdefer close(w.writerDone)\n\tfor msg := range w.wr {\n\t\tif msg == nil {\n\t\t\treturn\n\t\t}\n\t\tb, err := w.serializer.Serialize(msg.(wamp.Message))\n\t\tif err != nil {\n\t\t\tw.log.Print(err)\n\t\t}\n\n\t\tif err = w.conn.WriteMessage(w.payloadType, b); err != nil {\n\t\t\tif !wamp.IsGoodbyeAck(msg) {\n\t\t\t\tw.log.Print(err)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (h *HitBTC) wsSend(data interface{}) error {\n\th.wsRequestMtx.Lock()\n\tdefer h.wsRequestMtx.Unlock()\n\tjson, err := common.JSONEncode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif h.Verbose {\n\t\tlog.Debugf(\"%v sending message to websocket %v\", h.Name, data)\n\t}\n\treturn h.WebsocketConn.WriteMessage(websocket.TextMessage, json)\n}",
"func (session *session) webSocketSend(response configs.WsMessage) error {\n\tsession.writeMutex.Lock()\n\tdefer session.writeMutex.Unlock()\n\tresponse.Timestamp = time.Now().UnixNano() / 1000000\n\tresponse.SessionID = session.sessionID\n\n\treturn session.ws.WriteJSON(response)\n}",
"func serveWs(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, \"Method not allowed\", 405)\n\t\treturn\n\t}\n\n\tchatroomId := r.FormValue(\"chatroomId\")\n\n\tvar upgrader = websocket.Upgrader{\n\t\tReadBufferSize: 4096,\n\t\tWriteBufferSize: 4096,\t\n\t\tCheckOrigin: func(r *http.Request) bool { return true },\n\t}\n\n\tws, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\th := chatIdToHub[chatroomId]\n\tif h == nil{\n\t\tfmt.Println(\"Couldn't find hub.\")\n\t}\n\n\tc := &connection{chatroomId: chatroomId, nicknameId: \"sadf\", hub: h, send: make(chan []byte, 256), ws: ws}\n\tc.hub.register <- c\n\tgo c.writePump()\n\tc.readPump()\n}",
"func (h *Hub) sendAll(msg Message) {\n\tfor _, client := range h.Clients {\n\t\tif client.Connection != nil {\n\t\t\twebsocket.JSON.Send(client.Connection, msg)\n\t\t} else {\n\t\t\tlog.Printf(\"Client %v not connected\", client.ID)\n\t\t}\n\t}\n}",
"func SendEventsToMixPanel(mixPanel *cfenv.Service, msgChan chan *events.Envelope) {\n\tmixPanelToken = mixPanel.Credentials[\"token\"].(string)\n\tlog.Println(\"Using Mixpanel Token \" + mixPanelToken)\n\tfor i := 0; i < 3; i++ {\n\t\tgo MixPanelWorker(strconv.Itoa(i),\n\t\t\tMixPanelSender{URL: mixPanel.Credentials[\"uri\"].(string)})\n\t}\n\n\tfor msg := range msgChan {\n\t\tmixPanelChanel <- EventToJSON(msg)\n\t}\n}",
"func sendMessage(webSocket *websocket.Conn, msg Message) (err error) {\n\terr = websocket.JSON.Send(webSocket, &msg)\n\treturn\n\n}",
"func (h *hub) run() {\n for {\n select{\n case s := <- h.register:\n // fmt.Println(\"wild client has appeared in the brush!\")\n clients := h.channels[s.channel]\n if clients == nil {\n clients = make(map[*client]bool)\n h.channels[s.channel] = clients\n }\n h.channels[s.channel][s.client] = true\n //send the latest data for room (empty string if new room)\n //s.client.send <- []byte(contents[s.channel])\n case s := <- h.unregister:\n clients := h.channels[s.channel]\n if clients != nil {\n if _, ok := clients[s.client]; ok{\n delete(clients, s.client)\n close(s.client.send)\n if len(clients) == 0 {\n delete(h.channels, s.channel)\n if len(contents[s.channel]) != 0 {\n //delete contents for channel if no more clients using it.\n delete(contents, s.channel)\n }\n }\n }\n }\n case m := <- h.broadcast:\n clients := h.channels[m.channel]\n // fmt.Println(\"broadcasting message to \", clients, \"data is: \", string(m.data))\n for c := range clients {\n fmt.Println(\"broadcasting message to \", c, \"data is: \", string(m.data))\n select {\n case c.send <- m.data:\n contents[m.channel] = string(m.data)\n default:\n close(c.send)\n delete(clients, c)\n if len(clients) == 0 {\n delete(h.channels, m.channel)\n if len(contents[m.channel]) != 0 {\n //delete contents for channel if no more clients using it.\n delete(contents, m.channel)\n }\n }\n }\n }\n }\n }\n}",
"func ListenToWsChannel() {\n\tresp := &WsJsonResponse{\n\t\tAction: \"\",\n\t\tMessage: \"\",\n\t\tMessageType: \"\",\n\t}\n\tfor {\n\t\te := <-WsChan\n\n\t\tswitch e.Action {\n\t\tcase \"usernames\":\n\t\t\tusers := getAllClients()\n\t\t\tresp.Action = \"UsersList\"\n\t\t\tresp.MessageType = \"JSON\"\n\t\t\tresp.Message = \"Get all usernames\"\n\t\t\tresp.UsersList = users\n\t\t\tgo broadCastToAll(resp)\n\t\tcase \"left\":\n\t\t\tdelete(Clients, e.UserConn)\n\t\t\tusers := getAllClients()\n\t\t\tresp.Action = \"LeftUser\"\n\t\t\tresp.MessageType = \"JSON\"\n\t\t\tresp.Message = e.Username + \" Left !\"\n\t\t\tresp.UsersList = users\n\t\t\tclose(e.UserConn.CloseChan)\n\t\t\tgo broadCastToAll(resp)\n\t\tcase \"broadcast\":\n\t\t\tresp.Action = \"BroadCast\"\n\t\t\tresp.MessageType = \"JSON\"\n\t\t\tresp.Message = e.Username + \" : \" + e.Message\n\t\t\tgo broadCastToAll(resp)\n\t\tcase \"private\":\n\t\t\tresp.Action = \"Private\"\n\t\t\tresp.Message = \"JSON\"\n\t\t\tresp.Message = e.Username + \" : \" + e.Message\n\t\t\tgo sendPrivateMsg(resp, e.Target)\n\t\tdefault:\n\t\t\tresp.Action = e.Action + \"; Action\"\n\t\t\tresp.Message = fmt.Sprintf(\"Some message you sent : %v\", e.Username)\n\t\t\tresp.MessageType = \"JSON\"\n\t\t\terr := e.UserConn.MyConn.WriteJSON(resp)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}",
"func (s *Server) SendWebsocketUpdate() {\n\tmsg := websockets.Message{\n\t\tData: UpdateStateMessage{\n\t\t\tTournaments: s.DB.Tournaments,\n\t\t},\n\t}\n\n\ts.ws.SendAll(&msg)\n}",
"func (conn *PeerConnection) send(payload string) {\n\tconn.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\tconn.ws.WriteMessage(websocket.TextMessage, []byte(payload))\n}",
"func SendWorker(ch chan RemoteCommandMessage, broadlink broadlinkrm.Broadlink, wg *sync.WaitGroup) {\n\tfor msg := range ch {\n\t\tfor _, cmd := range msg.commands {\n\t\t\tswitch cmd.commandType {\n\t\t\tcase SendCommand:\n\t\t\t\terr := broadlink.Execute(cmd.target, cmd.data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error executing command: %v\", err)\n\t\t\t\t}\n\t\t\tcase Pause:\n\t\t\t\tinterval, err := strconv.Atoi(cmd.data)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error processing pause interval (%v): %v\", cmd.data, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Duration(interval) * time.Millisecond)\n\t\t\tcase shutdown:\n\t\t\t\twg.Done()\n\t\t\t\tlog.Print(\"SendWorker terminated\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}",
"func ChMessageSend(textChannelID, message string) {\n\tfor i := 0; i < 10; i++ {\n\t\t_, err := dg.ChannelMessageSend(textChannelID, message)\n\t\tif err != nil {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n}",
"func (handlers *Handlers) runWebsocket(client *websocket.Conn) (send chan<- []byte, weHaveQuit chan<- struct{}, receive <-chan []byte, remoteHasQuit <-chan struct{}) {\n\tconst maxMessageSize = 512\n\n\tweHaveQuitChan := make(chan struct{})\n\tremoteHasQuitChan := make(chan struct{})\n\tsendChan := make(chan []byte)\n\treceiveChan := make(chan []byte)\n\n\treadLoop := func() {\n\t\tdefer func() {\n\t\t\tclose(remoteHasQuitChan)\n\t\t\t_ = client.Close()\n\t\t}()\n\t\tclient.SetReadLimit(maxMessageSize)\n\t\tfor {\n\t\t\t_, msg, err := client.ReadMessage()\n\t\t\t// check if it is the message to request the pairing\n\t\t\tif string(msg) == \"v\" {\n\t\t\t\tmsg = handlers.noiseConfig.CheckVerification()\n\t\t\t\terr = client.WriteMessage(websocket.TextMessage, msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"Error, websocket failed to write channel hash verification message\")\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {\n\t\t\t\t\tlog.Println(\"Error, websocket closed unexpectedly in the reading loop\")\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tmessageDecrypted, err := handlers.noiseConfig.Decrypt(msg)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error, websocket could not decrypt incoming packages\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treceiveChan <- messageDecrypted\n\t\t}\n\t}\n\n\twriteLoop := func() {\n\t\tdefer func() {\n\t\t\t_ = client.Close()\n\t\t}()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase message, ok := <-sendChan:\n\t\t\t\tif !ok {\n\t\t\t\t\t_ = client.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr := client.WriteMessage(websocket.TextMessage, handlers.noiseConfig.Encrypt(message))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(\"Error, websocket closed unexpectedly in the writing loop\")\n\t\t\t\t}\n\t\t\tcase <-weHaveQuitChan:\n\t\t\t\t_ = client.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\tlog.Println(\"closing websocket connection\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tgo readLoop()\n\tgo writeLoop()\n\n\treturn sendChan, weHaveQuitChan, receiveChan, remoteHasQuitChan\n}",
"func (fr *frame) chanSend(ch *govalue, elem *govalue) {\n\telemtyp := ch.Type().Underlying().(*types.Chan).Elem()\n\telem = fr.convert(elem, elemtyp)\n\telemptr := fr.allocaBuilder.CreateAlloca(elem.value.Type(), \"\")\n\tfr.builder.CreateStore(elem.value, elemptr)\n\telemptr = fr.builder.CreateBitCast(elemptr, llvm.PointerType(llvm.Int8Type(), 0), \"\")\n\tchantyp := fr.types.ToRuntime(ch.Type())\n\tfr.runtime.sendBig.call(fr, chantyp, ch.value, elemptr)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewQuotaRateLimit registers a new resource with the given unique name, arguments, and options. | func NewQuotaRateLimit(ctx *pulumi.Context,
name string, args *QuotaRateLimitArgs, opts ...pulumi.ResourceOption) (*QuotaRateLimit, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.Rate == nil {
return nil, errors.New("invalid value for required argument 'Rate'")
}
var resource QuotaRateLimit
err := ctx.RegisterResource("vault:index/quotaRateLimit:QuotaRateLimit", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
} | [
"func New(arguments framework.Arguments) framework.Plugin {\n\treturn &resourceQuotaPlugin{\n\t\tpluginArguments: arguments,\n\t}\n}",
"func NewRateLimit(limit int, deltat time.Duration) *RateLimit {\n\treturn &RateLimit{Rate{NewCounter(0), deltat}, limit, time.Now()}\n}",
"func NewQuota(m int) *Quota {\n\treturn (&Quota{}).Init(m)\n}",
"func NewRateLimit(storage store.Store, statsClient stats.Client) *RateLimit {\n\treturn &RateLimit{storage, statsClient}\n}",
"func newQuota() *catalogue.Quota {\n\treturn &catalogue.Quota{\n\t\tCores: 99999,\n\t\tFloatingIPs: 99999,\n\t\tInstances: 99999,\n\t\tKeyPairs: 99999,\n\t\tRAM: 99999,\n\t\tTenant: \"test-tenant\",\n\t}\n}",
"func CreateQuota(f cmdutil.Factory, cmdOut io.Writer, cmd *cobra.Command, args []string) error {\n\tname, err := NameFromCommandArgs(cmd, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar generator kubectl.StructuredGenerator\n\tswitch generatorName := cmdutil.GetFlagString(cmd, \"generator\"); generatorName {\n\tcase cmdutil.ResourceQuotaV1GeneratorName:\n\t\tgenerator = &kubectl.ResourceQuotaGeneratorV1{\n\t\t\tName: name,\n\t\t\tHard: cmdutil.GetFlagString(cmd, \"hard\"),\n\t\t\tScopes: cmdutil.GetFlagString(cmd, \"scopes\"),\n\t\t}\n\tdefault:\n\t\treturn cmdutil.UsageError(cmd, fmt.Sprintf(\"Generator: %s not supported.\", generatorName))\n\t}\n\treturn RunCreateSubcommand(f, cmd, cmdOut, &CreateSubcommandOptions{\n\t\tName: name,\n\t\tStructuredGenerator: generator,\n\t\tDryRun: cmdutil.GetFlagBool(cmd, \"dry-run\"),\n\t\tOutputFormat: cmdutil.GetFlagString(cmd, \"output\"),\n\t})\n}",
"func (client QuotaRequestClient) Create(ctx context.Context, subscriptionID string, providerID string, location string, resourceName string, createQuotaRequest CurrentQuotaLimitBase, ifMatch string) (result QuotaRequestCreateFuture, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/QuotaRequestClient.Create\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response() != nil {\n\t\t\t\tsc = result.Response().StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\treq, err := client.CreatePreparer(ctx, subscriptionID, providerID, location, resourceName, createQuotaRequest, ifMatch)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"reservations.QuotaRequestClient\", \"Create\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresult, err = client.CreateSender(req)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"reservations.QuotaRequestClient\", \"Create\", result.Response(), \"Failure sending request\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func newQuotaPool(q int) *quotaPool {\n\tqp := "aPool{\n\t\tacquireChannel: make(chan int, 1),\n\t}\n\tif q > 0 {\n\t\tqp.acquireChannel <- q\n\t} else {\n\t\tqp.quota = q\n\t}\n\treturn qp\n}",
"func NewRateLimit(redisPool *redis.Pool, config *config.RateLimitConfig) *RateLimit {\n\treturn &RateLimit{\n\t\tredisPool: redisPool,\n\t\tconfig: config,\n\t}\n}",
"func NewRateLimit(maxEvents int, period time.Duration) *RateLimit {\n\tvar rl RateLimit\n\n\trl.start = make(chan struct{})\n\trl.finish = make(chan bool, maxEvents*2)\n\trl.close = make(chan chan error)\n\trl.countReq = make(chan chan int)\n\trl.outstandingReq = make(chan chan int)\n\n\trl.expires = make([]time.Time, maxEvents)\n\n\trl.maxEvents = maxEvents\n\trl.period = period\n\n\trl.activeStart = rl.start\n\n\tgo rl.run()\n\n\treturn &rl\n}",
"func NewLimitRate(name string, options LimitRateOptions) *LimitRate {\n\tthis := LimitRate{}\n\tthis.Name = name\n\tthis.Options = options\n\treturn &this\n}",
"func NewUsageLimit(ctx *pulumi.Context,\n\tname string, args *UsageLimitArgs, opts ...pulumi.ResourceOption) (*UsageLimit, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Amount == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Amount'\")\n\t}\n\tif args.ClusterIdentifier == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ClusterIdentifier'\")\n\t}\n\tif args.FeatureType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'FeatureType'\")\n\t}\n\tif args.LimitType == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'LimitType'\")\n\t}\n\tvar resource UsageLimit\n\terr := ctx.RegisterResource(\"aws:redshift/usageLimit:UsageLimit\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func New(config config.Config) (RateLimiter, error) {\n\n\tstorage, err := resolveBucketStore(config.Storage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlimits := []limit.Limit{}\n\tfor name, config := range config.Limits {\n\t\tlimit, err := limit.New(name, config, storage)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tlimits = append(limits, limit)\n\t}\n\n\trateLimiter := &rateLimiter{limits: limits}\n\treturn rateLimiter, nil\n}",
"func New(c *aqm.Config) *Limiter {\n\tl := &Limiter{\n\t\trate: vegas.New(),\n\t\tqueue: aqm.New(c),\n\t}\n\tgo func() {\n\t\tticker := time.NewTicker(time.Second * 1)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\t<-ticker.C\n\t\t\tv := l.rate.Stat()\n\t\t\tq := l.queue.Stat()\n\t\t\tlog.Info(\"rate/limit: limit(%d) inFlight(%d) minRtt(%v) rtt(%v) codel packets(%d)\", v.Limit, v.InFlight, v.MinRTT, v.LastRTT, q.Packets)\n\t\t}\n\t}()\n\treturn l\n}",
"func NewQuotaDriver(name string) BaseQuota {\n\tvar quota BaseQuota\n\tswitch name {\n\tcase \"grpquota\":\n\t\tquota = &GrpQuotaDriver{\n\t\t\tquotaIDs: make(map[uint32]struct{}),\n\t\t\tmountPoints: make(map[uint64]string),\n\t\t}\n\tcase \"prjquota\":\n\t\tquota = &PrjQuotaDriver{\n\t\t\tquotaIDs: make(map[uint32]struct{}),\n\t\t\tmountPoints: make(map[uint64]string),\n\t\t\tdevLimits: make(map[uint64]uint64),\n\t\t}\n\tdefault:\n\t\tkernelVersion, err := kernel.GetKernelVersion()\n\t\tif err == nil && kernelVersion.Kernel >= 4 {\n\t\t\tquota = &PrjQuotaDriver{\n\t\t\t\tquotaIDs: make(map[uint32]struct{}),\n\t\t\t\tmountPoints: make(map[uint64]string),\n\t\t\t\tdevLimits: make(map[uint64]uint64),\n\t\t\t}\n\t\t} else {\n\t\t\tquota = &GrpQuotaDriver{\n\t\t\t\tquotaIDs: make(map[uint32]struct{}),\n\t\t\t\tmountPoints: make(map[uint64]string),\n\t\t\t}\n\t\t}\n\t}\n\n\treturn quota\n}",
"func CreateResourceQuota(parent *tenancyv1alpha1.TanzuNamespace) (metav1.Object, error) {\n\n\tfmap := template.FuncMap{\n\t\t\"defaultResourceQuotaCPURequests\": defaultResourceQuotaCPURequests,\n\t\t\"defaultResourceQuotaMemoryRequests\": defaultResourceQuotaMemoryRequests,\n\t\t\"defaultResourceQuotaCPULimits\": defaultResourceQuotaCPULimits,\n\t\t\"defaultResourceQuotaMemoryLimits\": defaultResourceQuotaMemoryLimits,\n\t}\n\n\tchildContent, err := runTemplate(\"tanzu-resource-quota\", resourceResourceQuota, parent, fmap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdecode := scheme.Codecs.UniversalDeserializer().Decode\n\tobj, _, err := decode([]byte(childContent), nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresourceObj := obj.(*k8s_api.ResourceQuota)\n\tresourceObj.Namespace = defaultNamespace(parent.Name, &parent.Spec)\n\n\treturn resourceObj, nil\n}",
"func newRateLimiter() *rateLimiter {\n\trl := &rateLimiter{\n\t\tlimiters: make(map[internalpb.RateType]*ratelimitutil.Limiter),\n\t}\n\trl.registerLimiters()\n\treturn rl\n}",
"func newRatelimiter(qps int) workqueue.RateLimiter {\n\treturn workqueue.NewMaxOfRateLimiter(\n\t\tworkqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),\n\t\t&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(qps), qps*5)},\n\t)\n}",
"func createRateLimiter(rate ratelimiter.Limit) *ratelimiter.Limiter {\n\tvar rateLimiter *ratelimiter.Limiter\n\tif rate > 0 {\n\t\t// Allow burst of 1/10\n\t\tburst := int(rate / 10)\n\t\tif burst == 0 {\n\t\t\tburst = 1\n\t\t}\n\t\trateLimiter = ratelimiter.NewLimiter(rate, burst)\n\t}\n\treturn rateLimiter\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
GetQuotaRateLimit gets an existing QuotaRateLimit resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required). | func GetQuotaRateLimit(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *QuotaRateLimitState, opts ...pulumi.ResourceOption) (*QuotaRateLimit, error) {
var resource QuotaRateLimit
err := ctx.ReadResource("vault:index/quotaRateLimit:QuotaRateLimit", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
} | [
"func (t *Type) GetRateLimit(name string) (types.RateLimit, error) {\n\tif rl, exists := t.rateLimits[name]; exists {\n\t\treturn rl, nil\n\t}\n\treturn nil, types.ErrRateLimitNotFound\n}",
"func (c *Client) GetRateLimit() (*RateLimitResponse, error) {\n\treq, err := http.NewRequest(\"POST\", rateLimitURL, strings.NewReader(c.Payload))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application/json\")\n\tresp, err := c.HC.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tintf := RateLimitResponse{}\n\tif err = json.Unmarshal(bodyBytes, &intf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &intf, nil\n}",
"func (p *WorkerPool) GetRateLimit(ctx context.Context, rlRequest *RateLimitReq) (retval *RateLimitResp, reterr error) {\n\t// Delegate request to assigned channel based on request key.\n\tworker := p.getWorker(rlRequest.UniqueKey)\n\thandlerRequest := &request{\n\t\tctx: ctx,\n\t\tresp: make(chan *response, 1),\n\t\trequest: rlRequest,\n\t}\n\n\t// Send request.\n\tselect {\n\tcase worker.getRateLimitRequest <- handlerRequest:\n\t\t// Successfully sent request.\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n\n\tmetricWorkerQueueLength.WithLabelValues(\"GetRateLimit\", worker.name).Observe(float64(len(worker.getRateLimitRequest)))\n\n\t// Wait for response.\n\tselect {\n\tcase handlerResponse := <-handlerRequest.resp:\n\t\t// Successfully read response.\n\t\treturn handlerResponse.rl, handlerResponse.err\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n}",
"func NewQuotaRateLimit(ctx *pulumi.Context,\n\tname string, args *QuotaRateLimitArgs, opts ...pulumi.ResourceOption) (*QuotaRateLimit, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.Rate == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Rate'\")\n\t}\n\tvar resource QuotaRateLimit\n\terr := ctx.RegisterResource(\"vault:index/quotaRateLimit:QuotaRateLimit\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (c *Client) GetRateLimits(ctx context.Context, req *request) (remaining, resetTime int64, err error) {\n\tresp, err := c.client.GetRateLimits(ctx, &gubernator.GetRateLimitsReq{\n\t\tRequests: []*gubernator.RateLimitReq{{\n\t\t\tName: req.name,\n\t\t\tUniqueKey: req.key,\n\t\t\tHits: 1,\n\t\t\tLimit: req.limit,\n\t\t\tDuration: req.duration,\n\t\t\tAlgorithm: gubernator.Algorithm_LEAKY_BUCKET,\n\t\t}},\n\t})\n\tif err != nil {\n\t\treturn 0, 0, err\n\t}\n\n\tif resp.Responses[0].Status == gubernator.Status_OVER_LIMIT {\n\t\treturn 0, 0, errOverLimit\n\t}\n\n\treturn resp.Responses[0].Remaining, resp.Responses[0].ResetTime, nil\n}",
"func GetQuota(id int64) (*models.Quota, error) {\n\tq := models.Quota{ID: id}\n\terr := GetOrmer().Read(&q, \"ID\")\n\tif err == orm.ErrNoRows {\n\t\treturn nil, nil\n\t}\n\treturn &q, err\n}",
"func (s *Strava) GetRateLimits() (requestTime time.Time, limitShort, limitLong, usageShort, usageLong int) {\n\trateLimiting := strava.RateLimiting\n\treturn rateLimiting.RequestTime, rateLimiting.LimitShort, rateLimiting.LimitLong, rateLimiting.UsageShort, rateLimiting.UsageLong\n}",
"func RateLimit(q Quota, vary *VaryBy, store Store) *Throttler {\n\t// Extract requests and window\n\treqs, win := q.Quota()\n\n\t// Create and return the throttler\n\treturn &Throttler{\n\t\tlimiter: &rateLimiter{\n\t\t\treqs: reqs,\n\t\t\twindow: win,\n\t\t\tvary: vary,\n\t\t\tstore: store,\n\t\t},\n\t}\n}",
"func (r *Registry) Get(s Settings) *Ratelimit {\n\tif s.Type == DisableRatelimit || s.Type == NoRatelimit {\n\t\treturn nil\n\t}\n\treturn r.get(s)\n}",
"func (in *RateLimit) DeepCopy() *RateLimit {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(RateLimit)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func SetRateLimit() *RateLimit {\n\treturn &RateLimit{\n\t\tAuth: request.NewRateLimit(biflyerRateInterval, bitflyerPrivateRequestRate),\n\t\tUnAuth: request.NewRateLimit(biflyerRateInterval, bitflyerPublicRequestRate),\n\t\tOrder: request.NewRateLimit(biflyerRateInterval, bitflyerPrivateSendOrderRequestRate),\n\t\tLowVolume: request.NewRateLimit(time.Minute, bitflyerPrivateLowVolumeRequestRate),\n\t}\n}",
"func (p *PilotPanel) GetRateLimiting(inv invocation.Invocation, serviceType string) control.RateLimitingConfig {\n\treturn control.RateLimitingConfig{}\n}",
"func SetRateLimit() *RateLimit {\n\treturn &RateLimit{\n\t\tSpot: request.NewRateLimit(huobiSpotRateInterval, huobiSpotRequestRate),\n\t\tFuturesAuth: request.NewRateLimit(huobiFuturesRateInterval, huobiFuturesAuthRequestRate),\n\t\tFuturesUnauth: request.NewRateLimit(huobiFuturesRateInterval, huobiFuturesUnAuthRequestRate),\n\t\tSwapAuth: request.NewRateLimit(huobiSwapRateInterval, huobiSwapAuthRequestRate),\n\t\tSwapUnauth: request.NewRateLimit(huobiSwapRateInterval, huobiSwapUnauthRequestRate),\n\t\tFuturesXfer: request.NewRateLimit(huobiFuturesTransferRateInterval, huobiFuturesTransferReqRate),\n\t}\n}",
"func (e *Engine) GetRateLimit() int {\n\treturn cap(e.ratelimit)\n}",
"func SetRateLimit() *RateLimit {\n\treturn &RateLimit{\n\t\tAuth: request.NewRateLimit(bitmexRateInterval, bitmexAuthRate),\n\t\tUnAuth: request.NewRateLimit(bitmexRateInterval, bitmexUnauthRate),\n\t}\n}",
"func GetServiceQuota(ctx *pulumi.Context,\n\tname string, id pulumi.ID, state *ServiceQuotaState, opts ...pulumi.ResourceOpt) (*ServiceQuota, error) {\n\tinputs := make(map[string]interface{})\n\tif state != nil {\n\t\tinputs[\"adjustable\"] = state.Adjustable\n\t\tinputs[\"arn\"] = state.Arn\n\t\tinputs[\"defaultValue\"] = state.DefaultValue\n\t\tinputs[\"quotaCode\"] = state.QuotaCode\n\t\tinputs[\"quotaName\"] = state.QuotaName\n\t\tinputs[\"requestId\"] = state.RequestId\n\t\tinputs[\"requestStatus\"] = state.RequestStatus\n\t\tinputs[\"serviceCode\"] = state.ServiceCode\n\t\tinputs[\"serviceName\"] = state.ServiceName\n\t\tinputs[\"value\"] = state.Value\n\t}\n\ts, err := ctx.ReadResource(\"aws:servicequotas/serviceQuota:ServiceQuota\", name, id, inputs, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ServiceQuota{s: s}, nil\n}",
"func (c *StorageGateway) DescribeBandwidthRateLimit(req *DescribeBandwidthRateLimitInput) (resp *DescribeBandwidthRateLimitOutput, err error) {\n\tresp = &DescribeBandwidthRateLimitOutput{}\n\terr = c.client.Do(\"DescribeBandwidthRateLimit\", \"POST\", \"/\", req, resp)\n\treturn\n}",
"func (c *Client) RateLimit() RateLimit {\n\tif c.limit != nil {\n\t\treturn *c.limit\n\t}\n\taccount, err := c.Account.Get()\n\tif err != nil {\n\t\treturn RateLimit{}\n\t}\n\tc.limit = &RateLimit{}\n\tfor _, metric := range account.Metrics {\n\t\tif metric.PlanLevel > 0 {\n\t\t\tc.limit.Limit = metric.PlanLevel\n\t\t\tc.limit.Remaining = metric.Remaining\n\t\t}\n\t}\n\treturn *c.limit\n}",
"func (q *XfsPrjQuota) GetQuota(file string) (*types.QuotaLimit, error) {\n\t// TODO: Not implemented\n\treturn nil, nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Path of the mount or namespace to apply the quota. A blank path configures a global rate limit quota. For example `namespace1/` adds a quota to a full namespace, `namespace1/auth/userpass` adds a `quota` to `userpass` in `namespace1`. Updating this field on an existing quota can have "moving" effects. For example, updating `auth/userpass` to `namespace1/auth/userpass` moves this quota from being a global mount quota to a namespace specific mount quota. Note, namespaces are supported in Enterprise only. | func (o QuotaRateLimitOutput) Path() pulumi.StringPtrOutput {
return o.ApplyT(func(v *QuotaRateLimit) pulumi.StringPtrOutput { return v.Path }).(pulumi.StringPtrOutput)
} | [
"func (m *Drive) SetQuota(value Quotaable)() {\n m.quota = value\n}",
"func Quota(path string, size ...string) string {\n\tif len(size) > 0 && len(size[0]) > 0 {\n\t\tout, err := exec.Command(\"btrfs\", \"qgroup\", \"limit\", size[0]+\"G\", config.Agent.LxcPrefix+path).CombinedOutput()\n\t\tlog.Check(log.ErrorLevel, \"Limiting BTRFS subvolume \"+config.Agent.LxcPrefix+path+\" \"+string(out), err)\n\t\texec.Command(\"btrfs\", \"quota\", \"rescan\", \"-w\", config.Agent.LxcPrefix).Run()\n\t}\n\treturn Stat(path, \"quota\", false)\n}",
"func (q *Control) SetQuota(targetPath string, quota Quota) error {\n\tq.RLock()\n\tprojectID, ok := q.quotas[targetPath]\n\tq.RUnlock()\n\tif !ok {\n\t\tstate := getPquotaState()\n\t\tstate.Lock()\n\t\tprojectID = state.nextProjectID\n\n\t\t//\n\t\t// assign project id to new container directory\n\t\t//\n\t\terr := setProjectID(targetPath, projectID)\n\t\tif err != nil {\n\t\t\tstate.Unlock()\n\t\t\treturn err\n\t\t}\n\n\t\tstate.nextProjectID++\n\t\tstate.Unlock()\n\n\t\tq.Lock()\n\t\tq.quotas[targetPath] = projectID\n\t\tq.Unlock()\n\t}\n\n\t//\n\t// set the quota limit for the container's project id\n\t//\n\tlog.G(context.TODO()).Debugf(\"SetQuota(%s, %d): projectID=%d\", targetPath, quota.Size, projectID)\n\treturn setProjectQuota(q.backingFsBlockDev, projectID, quota)\n}",
"func DiskQuota(path string, size ...string) string {\n\tparent := id(path)\n\texec.Command(\"btrfs\", \"qgroup\", \"create\", \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\texec.Command(\"btrfs\", \"qgroup\", \"assign\", \"0/\"+id(path+\"/opt\"), \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\texec.Command(\"btrfs\", \"qgroup\", \"assign\", \"0/\"+id(path+\"/var\"), \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\texec.Command(\"btrfs\", \"qgroup\", \"assign\", \"0/\"+id(path+\"/home\"), \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\texec.Command(\"btrfs\", \"qgroup\", \"assign\", \"0/\"+id(path+\"/rootfs\"), \"1/\"+parent, config.Agent.LxcPrefix+path).Run()\n\n\tif len(size) > 0 && len(size[0]) > 0 {\n\t\tout, err := exec.Command(\"btrfs\", \"qgroup\", \"limit\", size[0]+\"G\", \"1/\"+parent, config.Agent.LxcPrefix+path).CombinedOutput()\n\t\tlog.Check(log.ErrorLevel, \"Limiting BTRFS group 1/\"+parent+\" \"+string(out), err)\n\t\texec.Command(\"btrfs\", \"quota\", \"rescan\", \"-w\", config.Agent.LxcPrefix).Run()\n\t}\n\treturn Stat(path, \"quota\", false)\n}",
"func (q *Control) GetQuota(targetPath string, quota *Quota) error {\n\tq.RLock()\n\tprojectID, ok := q.quotas[targetPath]\n\tq.RUnlock()\n\tif !ok {\n\t\treturn errors.Errorf(\"quota not found for path: %s\", targetPath)\n\t}\n\n\t//\n\t// get the quota limit for the container's project id\n\t//\n\tvar d C.fs_disk_quota_t\n\n\tcs := C.CString(q.backingFsBlockDev)\n\tdefer C.free(unsafe.Pointer(cs))\n\n\t_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,\n\t\tuintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),\n\t\tuintptr(unsafe.Pointer(&d)), 0, 0)\n\tif errno != 0 {\n\t\treturn errors.Wrapf(errno, \"Failed to get quota limit for projid %d on %s\",\n\t\t\tprojectID, q.backingFsBlockDev)\n\t}\n\tquota.Size = uint64(d.d_blk_hardlimit) * 512\n\n\treturn nil\n}",
"func GetDefaultQuota(quotas map[string]string) string {\n\tif quotas == nil {\n\t\treturn \"\"\n\t}\n\n\t// \"/\" means the disk quota only takes effect on rootfs + 0 * volume\n\tquota, ok := quotas[\"/\"]\n\tif ok && quota != \"\" {\n\t\treturn quota\n\t}\n\n\t// \".*\" means the disk quota only takes effect on rootfs + n * volume\n\tquota, ok = quotas[\".*\"]\n\tif ok && quota != \"\" {\n\t\treturn quota\n\t}\n\n\treturn \"\"\n}",
"func (xfsq *XFSQuota) SetQuota(ctx context.Context, limit int64) error {\n\n\t_, err := xfsq.GetVolumeStats(ctx)\n\t// error getting quota value\n\tif err != nil && err != ErrProjNotFound {\n\t\treturn err\n\t}\n\t// this means quota has already been set\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tlimitInStr := strconv.FormatInt(limit, 10)\n\tpid := getProjectIDHash(xfsq.ProjectID)\n\n\tglog.V(3).Infof(\"setting prjquota proj_id=%s path=%s\", pid, xfsq.Path)\n\n\tcmd := exec.CommandContext(ctx, \"xfs_quota\", \"-x\", \"-c\", fmt.Sprintf(\"project -d 0 -s -p %s %s\", xfsq.Path, pid))\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tglog.Errorf(\"could not set prjquota proj_id=%s path=%s err=%v\", pid, xfsq.Path, err)\n\t\treturn fmt.Errorf(\"SetQuota failed for %s with error: (%v), output: (%s)\", xfsq.ProjectID, err, out)\n\t}\n\n\tcmd = exec.CommandContext(ctx, \"xfs_quota\", \"-x\", \"-c\", fmt.Sprintf(\"limit -p bhard=%s %s\", limitInStr, pid), xfsq.Path)\n\tout, err = cmd.CombinedOutput()\n\tif err != nil {\n\t\tglog.Errorf(\"could not set prjquota proj_id=%s path=%s err=%v\", pid, xfsq.Path, err)\n\t\treturn fmt.Errorf(\"xfs_quota failed with error: %v, output: %s\", err, out)\n\t}\n\tglog.V(3).Infof(\"prjquota set successfully proj_id=%s path=%s\", pid, xfsq.Path)\n\n\treturn nil\n}",
"func (d *dir) setQuota(path string, volID int64, sizeBytes int64) error {\n\tif volID == volIDQuotaSkip {\n\t\t// Disabled on purpose, just ignore.\n\t\treturn nil\n\t}\n\n\tif volID == 0 {\n\t\treturn fmt.Errorf(\"Missing volume ID\")\n\t}\n\n\tok, err := quota.Supported(path)\n\tif err != nil || !ok {\n\t\tif sizeBytes > 0 {\n\t\t\t// Skipping quota as underlying filesystem doesn't suppport project quotas.\n\t\t\td.logger.Warn(\"The backing filesystem doesn't support quotas, skipping set quota\", logger.Ctx{\"path\": path, \"size\": sizeBytes, \"volID\": volID})\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tprojectID := d.quotaProjectID(volID)\n\tcurrentProjectID, err := quota.GetProject(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Remove current project if desired project ID is different.\n\tif currentProjectID != d.quotaProjectID(volID) {\n\t\terr = quota.DeleteProject(path, currentProjectID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Initialise the project.\n\terr = quota.SetProject(path, projectID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set the project quota size.\n\treturn quota.SetProjectQuota(path, projectID, sizeBytes)\n}",
"func (p *projectQuota) ClearQuota(targetPath string) error {\n\tbackingFsBlockDev, err := p.findAvailableBackingDev(targetPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// no need to create new project id\n\tprjId, _, err := p.findOrCreateProjectId(targetPath, \"\", projIdNoCreate, persistToFile)\n\tif err != nil {\n\t\tklog.Errorf(\"find project id err: %v\", err)\n\t\treturn err\n\t}\n\n\tsize := &types.DiskQuotaSize{\n\t\tQuota: 0,\n\t\tInodes: 0,\n\t}\n\terr = setProjectQuota(backingFsBlockDev.device, prjId, size)\n\tif err != nil {\n\t\t// just warning\n\t\tklog.Errorf(\"set zero quota failed for path(%s) with id(%d): %v\", targetPath, prjId, err)\n\t}\n\n\t// save\n\tprojName, ok := p.idNames[prjId]\n\tdelete(p.pathIds, targetPath)\n\tdelete(p.pathMapBackingDev, targetPath)\n\tdelete(p.idPaths, prjId)\n\tdelete(p.idNames, prjId)\n\tif ok {\n\t\tdelete(p.nameIds, projName)\n\t}\n\n\t// save to file\n\tif p.prjFile != nil {\n\t\tp.prjFile.UpdateProjects(p.idPaths)\n\t\tp.prjFile.UpdateProjIds(p.idNames)\n\t}\n\n\treturn nil\n}",
"func (a *Client) SetQuota(params *SetQuotaParams) (*SetQuotaOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewSetQuotaParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"setQuota\",\n\t\tMethod: \"PATCH\",\n\t\tPathPattern: \"/api/v1/quotas\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &SetQuotaReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result.(*SetQuotaOK), nil\n\n}",
"func (h *handlerState) Quota(ctx context.Context, tracker attribute.Tracker, request *mixerpb.QuotaRequest, response *mixerpb.QuotaResponse) {\n\tresponse.RequestIndex = request.RequestIndex\n\tstatus := h.execute(ctx, tracker, request.AttributeUpdate, config.QuotaMethod)\n\n\tif status.Code == int32(code.Code_OK) {\n\t\tresponse.Amount = 1\n\t}\n}",
"func (m *Drive) GetQuota()(Quotaable) {\n return m.quota\n}",
"func (q *XfsPrjQuota) SetQuota(file string, id uint64, quota *types.QuotaLimit) error {\n\t// set quota id\n\tsetID := fmt.Sprintf(\"project -s -p %s %d\", file, id)\n\n\tret, err := cmd.Run(0, XfsQuota, \"-xc\", setID)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to run (%s -xc %s)\", XfsQuota, setID)\n\t}\n\tif ret.ExitCode != 0 {\n\t\treturn errors.Errorf(\"failed to run (%s -xc %s), result(%v)\",\n\t\t\tXfsQuota, setID, ret)\n\t}\n\n\t// set quota limit\n\tsetLimit := fmt.Sprintf(\"limit -p bhard=%dm bsoft=%dm ihard=%d isoft=%d %d %s\",\n\t\tquota.BlockHardLimit, quota.BlockSoftLimit,\n\t\tquota.InodeHardLimit, quota.InodeSoftLimit,\n\t\tid, q.Mount.MountPoint)\n\tret, err = cmd.Run(0, XfsQuota, \"-xc\", setLimit)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to run (%s -xc %s)\", XfsQuota, setLimit)\n\t}\n\tif ret.ExitCode != 0 {\n\t\treturn errors.Errorf(\"failed to run (%s -xc %s), result(%v)\",\n\t\t\tXfsQuota, setLimit, ret)\n\t}\n\n\treturn nil\n}",
"func TenantResourceQuota(tenant string) string {\n\treturn tenant\n}",
"func (p *projectQuota) GetAllQuotaPath() map[types.VolumeType]sets.String {\n\tpaths := make(map[types.VolumeType]sets.String)\n\n\tfor id, pathGroup := range p.idPaths {\n\t\tfor _, path := range pathGroup {\n\t\t\tidName := p.idNames[id]\n\t\t\tranges := strings.Split(idName, idNameSeprator)\n\t\t\tpathFlag := types.VolumeType(ranges[0])\n\t\t\tif _, ok := paths[pathFlag]; ok {\n\t\t\t\tpaths[pathFlag].Insert(path)\n\t\t\t} else {\n\t\t\t\tpaths[pathFlag] = sets.NewString(path)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn paths\n}",
"func (b *ClusterBuilder) StorageQuota(value *ValueBuilder) *ClusterBuilder {\n\tb.storageQuota = value\n\tif value != nil {\n\t\tb.bitmap_ |= 1099511627776\n\t} else {\n\t\tb.bitmap_ &^= 1099511627776\n\t}\n\treturn b\n}",
"func UpdateQuota(c context.Context, limit, usage float64, metric, project, region string) {\n\tquotaLimit.Set(c, limit, metric, project, region)\n\tquotaRemaining.Set(c, limit-usage, metric, project, region)\n\tquotaUsage.Set(c, usage, metric, project, region)\n}",
"func PathToMetric(p string) string {\n\t// XXX: What do we do with absolute paths that don't begin with Prefix?\n\tp = path.Clean(p)\n\tif strings.HasPrefix(p, Prefix) {\n\t\tp = p[len(Prefix):]\n\t}\n\tif strings.HasPrefix(p, \"/\") {\n\t\tp = p[1:]\n\t}\n\n\tp = strings.Replace(p, \".wsp\", \"\", 1)\n\treturn strings.Replace(p, \"/\", \".\", -1)\n}",
"func (p *projectQuota) GetQuota(targetPath string) (*types.DiskQuotaSize, error) {\n\tbackingFsBlockDev, err := p.findAvailableBackingDev(targetPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// no need to create new project id\n\tprjId, _, err := p.findOrCreateProjectId(targetPath, \"\", projIdNoCreate, !persistToFile)\n\tif err != nil {\n\t\tklog.Errorf(\"find project id err: %v\", err)\n\t\treturn nil, err\n\t}\n\n\treturn getProjectQuota(backingFsBlockDev.device, prjId)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewEC2RemoteClient creates and initialise a new EC2RemoteClient object, given an AWS Instance ID | func NewEC2RemoteClient(InstanceID *string, credentials *sshCmdClient.SSHCredentials) (*EC2RemoteClient, error) {
ins := new(EC2RemoteClient)
ins.InstanceID = *InstanceID
session, err := session.NewSession()
if err != nil {
return nil, err
}
ec2Client := ec2.New(session)
ins.session = session
ins.ec2Client = ec2Client
ins.sshCredentials = credentials
err = ins.makeReady()
return ins, err
} | [
"func NewEC2Instance(InstId string) *AWSEC2Instance {\n return &AWSEC2Instance{InstanceId: InstId}\n}",
"func NewEc2Client(t testing.TestingT, region string) *ec2.EC2 {\n\tclient, err := NewEc2ClientE(t, region)\n\trequire.NoError(t, err)\n\treturn client\n}",
"func CreateEC2Client(credentials *credentials.Credentials, region string) (*ec2.EC2, error) {\n\tsess, err := session.NewSession(&aws.Config{\n\t\tCredentials: credentials,\n\t\tRegion: ®ion,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ec2.New(sess), nil\n}",
"func NewClient(config Config) (Client, error) {\n\tuuid.SetNodeID([]byte(fmt.Sprintf(\"%s:%s\", config.AppID, config.InstanceID)))\n\tconns := connection.NewManager()\n\tsubConn, err := conns.Connect(fmt.Sprintf(\"%s:%d\", config.SubscriptionService.GetHost(), config.SubscriptionService.GetPort()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te2tConn, err := conns.Connect(fmt.Sprintf(\"%s:%d\", config.E2TService.GetHost(), config.E2TService.GetPort()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &e2Client{\n\t\tconfig: config,\n\t\tepClient: endpoint.NewClient(subConn),\n\t\tsubClient: subscription.NewClient(subConn),\n\t\ttaskClient: subscriptiontask.NewClient(subConn),\n\t\tterminationClient: termination.NewClient(e2tConn),\n\t\tconns: conns,\n\t}, nil\n}",
"func GetEc2Client(credConfig *cred.Config) (*ec2.EC2, error) {\n\tconfig, err := GetAWSCredentialConfig(credConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tec2Session := session.Must(session.NewSession())\n\treturn ec2.New(ec2Session, config), nil\n}",
"func NewRemoteClient(config *Config) *Client {\n\tif config == nil {\n\t\tconfig = &Config{}\n\t}\n\n\tclient := api.NewShell(config.Host)\n\thost := config.Host\n\tif host == \"\" {\n\t\tvar err error\n\t\thost, err = getIpfsAPIURL()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\treturn &Client{\n\t\tclient: client,\n\t\tisRemote: true,\n\t\thost: host,\n\t\tgatewayURL: config.GatewayURL,\n\t}\n}",
"func NewAwsClient() (*AwsClient, error) {\n\n\ts, err := session.NewSession(&aws.Config{\n\t\tMaxRetries: aws.Int(0),\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to connect to AWS metadata service: %s\", err)\n\t}\n\n\tmd := ec2metadata.New(s)\n\tidDoc, err := md.GetInstanceIdentityDocument()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to GetInstanceIdentityDocument: %s\", err)\n\t}\n\tclient := ec2.New(s, aws.NewConfig().WithRegion(idDoc.Region))\n\n\tlogrus.Debug(\"NewAwsClient built\")\n\treturn &AwsClient{\n\t\taws: client,\n\t\tinstanceID: idDoc.InstanceID,\n\t\tprivateIP: idDoc.PrivateIP,\n\t\tnicIPtoID: make(map[string]string),\n\t}, nil\n}",
"func NewClient(accessKeyID, secretAccessKey, region string) ClientInterface {\n\tvar (\n\t\tawsConfig = &aws.Config{\n\t\t\tCredentials: credentials.NewStaticCredentials(accessKeyID, secretAccessKey, \"\"),\n\t\t}\n\t\tsess = session.Must(session.NewSession(awsConfig))\n\t\tconfig = &aws.Config{Region: aws.String(region)}\n\t)\n\n\treturn &Client{\n\t\tEC2: ec2.New(sess, config),\n\t\tELB: elb.New(sess, config),\n\t\tSTS: sts.New(sess, config),\n\t}\n}",
"func NewV2Client(opts *ClientOptions) *Clientv2 {\n\t// Ref: https://github.com/mediocregopher/radix/blob/master/radix.go#L107\n\tcustomConnFunc := func(network, addr string) (radix.Conn, error) {\n\t\treturn radix.Dial(network, addr,\n\t\t\tradix.DialTimeout(opts.WriteTimeout),\n\t\t\tradix.DialAuthPass(opts.Password),\n\t\t\tradix.DialSelectDB(opts.DB),\n\t\t)\n\t}\n\tpoolSize := opts.PoolSize\n\tif poolSize == 0 {\n\t\tpoolSize = 15\n\t}\n\n\trclient, _ := radix.NewPool(\"tcp\", opts.Host+\":\"+opts.Port, poolSize, radix.PoolConnFunc(customConnFunc))\n\tvar client = &Clientv2{pool: rclient}\n\treturn client\n}",
"func NewClient(accessKeyID, secretAccessKey, region string) (*Client, error) {\n\tvar (\n\t\tawsConfig = &aws.Config{\n\t\t\tCredentials: credentials.NewStaticCredentials(accessKeyID, secretAccessKey, \"\"),\n\t\t}\n\t\tconfig = &aws.Config{Region: aws.String(region)}\n\t)\n\n\ts, err := session.NewSession(awsConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Client{\n\t\tEC2: ec2.New(s, config),\n\t\tELB: elb.New(s, config),\n\t\tELBv2: elbv2.New(s, config),\n\t\tIAM: iam.New(s, config),\n\t\tSTS: sts.New(s, config),\n\t\tS3: s3.New(s, config),\n\t}, nil\n}",
"func (*SDKGetter) EC2(session *session.Session) EC2Interface {\n\treturn ec2svc.NewService(ec2.New(session))\n}",
"func NewEC2(awsSession *session.Session) {\n\tEC2svc = &EC2{\n\t\tec2.New(awsSession),\n\t\tccache.New(ccache.Configure()),\n\t}\n}",
"func NewRemote(acctId, appKey, bucket, url string) (*Remote, error) {\n\tb2, err := backblaze.NewB2(backblaze.Credentials{\n\t\tAccountID: acctId,\n\t\tApplicationKey: appKey,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Remote{\n\t\tb2: b2,\n\t\tbucket: bucket,\n\t\turl: url,\n\t}, nil\n}",
"func NewEc2Instance(ctx sdutils.AppContext, dd *awsDeploymentDescription) (*Ec2Instance, error) {\n\tvar err error\n\tcustomData := \"\"\n\tif dd.customPropFile != \"\" {\n\t\tdata, err := ioutil.ReadFile(dd.customPropFile)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid custom properties file: %s\", err)\n\t\t}\n\t\tcustomData = string(data)\n\t}\n\n\tcustomLog4J := \"\"\n\tif dd.customLog4J != \"\" {\n\t\tlog4JData, err := ioutil.ReadFile(dd.customLog4J)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid custom properties file: %s\", err)\n\t\t}\n\t\tcustomLog4J = base64.StdEncoding.EncodeToString(log4JData)\n\t}\n\n\tvar envBuffer bytes.Buffer\n\tfor _, env := range dd.environment {\n\t\tenvBuffer.WriteString(fmt.Sprintf(\"export %s\\n\", env))\n\t}\n\t// The custom script cannot be null in terraform so make a temp one\n\tscriptData := []byte(\"#!/bin/bash\\nexit 0\\n\")\n\tif dd.CustomScript != \"\" {\n\t\tscriptData, err = ioutil.ReadFile(dd.CustomScript)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to read the script %s: %s\", dd.CustomScript, err.Error())\n\t\t}\n\t}\n\tbase64CustomScriptData := base64.StdEncoding.EncodeToString(scriptData)\n\tbase64CustomScriptPath := path.Join(dd.deployDir, \"custom-stardogscript.base64\")\n\terr = ioutil.WriteFile(base64CustomScriptPath, []byte(base64CustomScriptData), 0644)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to create the base 64 encoded custom script\")\n\t}\n\n\tscriptZkData := []byte(\"#!/bin/bash\\nexit 0\\n\")\n\tif dd.CustomZkScript != \"\" {\n\t\tscriptZkData, err = ioutil.ReadFile(dd.CustomZkScript)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to read the script %s: %s\", dd.CustomZkScript, err.Error())\n\t\t}\n\t}\n\tbase64CustomZkScriptData := base64.StdEncoding.EncodeToString(scriptZkData)\n\tbase64CustomZkScriptPath := path.Join(dd.deployDir, \"custom-zk-stardogscript.base64\")\n\terr = ioutil.WriteFile(base64CustomZkScriptPath, []byte(base64CustomZkScriptData), 0644)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Failed to create the base 64 encoded custom zk script\")\n\t}\n\n\tinstance := Ec2Instance{\n\t\tDeploymentName: dd.Name,\n\t\tRegion: dd.Region,\n\t\tKeyName: dd.AwsKeyName,\n\t\tVersion: dd.Version,\n\t\tZkInstanceType: dd.ZkInstanceType,\n\t\tSdInstanceType: dd.SdInstanceType,\n\t\tAmiID: dd.AmiID,\n\t\tPrivateKey: dd.PrivateKeyPath,\n\t\tDeployDir: dd.deployDir,\n\t\tCustomScript: base64CustomScriptPath,\n\t\tCustomZkScript: base64CustomZkScriptPath,\n\t\tCtx: ctx,\n\t\tCustomPropsData: customData,\n\t\tCustomLog4JData: customLog4J,\n\t\tEnvironment: envBuffer.String(),\n\t}\n\tif dd.disableSecurity {\n\t\tinstance.StartOpts = \"--disable-security\"\n\t}\n\treturn &instance, nil\n}",
"func New(awsConfig *aws.Config) *Client {\n\tctx := aws.BackgroundContext()\n\tsess := session.Must(session.NewSession(awsConfig))\n\tiam := iam.New(sess)\n\troute53 := route53.New(sess)\n\tec2 := ec2.New(sess)\n\treturn &Client{\n\t\tIAM: iam,\n\t\tRoute53: route53,\n\t\tEC2: ec2,\n\t\tContext: ctx,\n\t}\n}",
"func NewRemote(endpoint string) model.SecretService {\n\treturn &plugin{endpoint}\n}",
"func Ec2Client(region string) *ec2.EC2 {\n\treturn ec2.New(session.New(), aws.NewConfig().WithRegion(region))\n}",
"func newRemoteCNIServer(logger logging.Logger, vppTxnFactory func() linuxclient.DataChangeDSL, proxy kvdbproxy.Proxy,\n\tconfiguredContainers *containeridx.ConfigIndex, govppChan api.Channel, index ifaceidx.SwIfIndex, dhcpIndex ifaceidx.DhcpIndex, agentLabel string,\n\tconfig *Config, nodeConfig *NodeConfig, nodeID uint32, nodeExcludeIPs []net.IP, broker keyval.ProtoBroker, http rest.HTTPHandlers) (*remoteCNIserver, error) {\n\tipam, err := ipam.New(logger, nodeID, agentLabel, &config.IPAMConfig, nodeExcludeIPs, broker, http)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserver := &remoteCNIserver{\n\t\tLogger: logger,\n\t\tvppTxnFactory: vppTxnFactory,\n\t\tproxy: proxy,\n\t\tconfiguredContainers: configuredContainers,\n\t\tgovppChan: govppChan,\n\t\tswIfIndex: index,\n\t\tdhcpIndex: dhcpIndex,\n\t\tagentLabel: agentLabel,\n\t\tnodeID: nodeID,\n\t\tipam: ipam,\n\t\tnodeConfig: nodeConfig,\n\t\tconfig: config,\n\t\ttcpChecksumOffloadDisabled: config.TCPChecksumOffloadDisabled,\n\t\tuseTAPInterfaces: config.UseTAPInterfaces,\n\t\ttapVersion: config.TAPInterfaceVersion,\n\t\ttapV2RxRingSize: config.TAPv2RxRingSize,\n\t\ttapV2TxRingSize: config.TAPv2TxRingSize,\n\t\tdisableTCPstack: config.TCPstackDisabled,\n\t\tuseL2Interconnect: config.UseL2Interconnect,\n\t\tconfiguredInThisRun: map[string]bool{},\n\t}\n\tserver.vswitchCond = sync.NewCond(&server.Mutex)\n\tserver.ctx, server.ctxCancelFunc = context.WithCancel(context.Background())\n\tif nodeConfig != nil && nodeConfig.Gateway != \"\" {\n\t\tserver.defaultGw = net.ParseIP(nodeConfig.Gateway)\n\t}\n\tserver.dhcpNotif = make(chan ifaceidx.DhcpIdxDto, 1)\n\treturn server, nil\n}",
"func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {\n\tif !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {\n\t\t// If the http client is unmodified and this feature is not disabled\n\t\t// set custom timeouts for EC2Metadata requests.\n\t\tcfg.HTTPClient = &http.Client{\n\t\t\t// use a shorter timeout than default because the metadata\n\t\t\t// service is local if it is running, and to fail faster\n\t\t\t// if not running on an ec2 instance.\n\t\t\tTimeout: 1 * time.Second,\n\t\t}\n\t\t// max number of retries on the client operation\n\t\tcfg.MaxRetries = aws.Int(2)\n\t}\n\n\tif u, err := url.Parse(endpoint); err == nil {\n\t\t// Remove path from the endpoint since it will be added by requests.\n\t\t// This is an artifact of the SDK adding `/latest` to the endpoint for\n\t\t// EC2 IMDS, but this is now moved to the operation definition.\n\t\tu.Path = \"\"\n\t\tu.RawPath = \"\"\n\t\tendpoint = u.String()\n\t}\n\n\tsvc := &EC2Metadata{\n\t\tClient: client.New(\n\t\t\tcfg,\n\t\t\tmetadata.ClientInfo{\n\t\t\t\tServiceName: ServiceName,\n\t\t\t\tServiceID: ServiceName,\n\t\t\t\tEndpoint: endpoint,\n\t\t\t\tAPIVersion: \"latest\",\n\t\t\t},\n\t\t\thandlers,\n\t\t),\n\t}\n\n\t// token provider instance\n\ttp := newTokenProvider(svc, defaultTTL)\n\n\t// NamedHandler for fetching token\n\tsvc.Handlers.Sign.PushBackNamed(request.NamedHandler{\n\t\tName: fetchTokenHandlerName,\n\t\tFn: tp.fetchTokenHandler,\n\t})\n\t// NamedHandler for enabling token provider\n\tsvc.Handlers.Complete.PushBackNamed(request.NamedHandler{\n\t\tName: enableTokenProviderHandlerName,\n\t\tFn: tp.enableTokenProviderHandler,\n\t})\n\n\tsvc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler)\n\tsvc.Handlers.UnmarshalError.PushBack(unmarshalError)\n\tsvc.Handlers.Validate.Clear()\n\tsvc.Handlers.Validate.PushBack(validateEndpointHandler)\n\n\t// Disable the EC2 Metadata service if the environment variable is set.\n\t// This short-circuits the service's functionality to always fail to send\n\t// requests.\n\tif strings.ToLower(os.Getenv(disableServiceEnvVar)) == \"true\" {\n\t\tsvc.Handlers.Send.SwapNamed(request.NamedHandler{\n\t\t\tName: corehandlers.SendHandler.Name,\n\t\t\tFn: func(r *request.Request) {\n\t\t\t\tr.HTTPResponse = &http.Response{\n\t\t\t\t\tHeader: http.Header{},\n\t\t\t\t}\n\t\t\t\tr.Error = awserr.New(\n\t\t\t\t\trequest.CanceledErrorCode,\n\t\t\t\t\t\"EC2 IMDS access disabled via \"+disableServiceEnvVar+\" env var\",\n\t\t\t\t\tnil)\n\t\t\t},\n\t\t})\n\t}\n\n\t// Add additional options to the service config\n\tfor _, option := range opts {\n\t\toption(svc.Client)\n\t}\n\treturn svc\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
startInstance starts an EC2 instance, and waits for it to become ready | func (ins *EC2RemoteClient) startInstance() error {
log.Printf("Starting EC2 Instance %s", ins.InstanceID)
_, err := ins.ec2Client.StartInstances(&ec2.StartInstancesInput{InstanceIds: aws.StringSlice([]string{ins.InstanceID})})
if err != nil {
return fmt.Errorf("Error starting instance : %s", err)
}
log.Printf("Waiting for Instance %s to become ready (may take a few minutes)", ins.InstanceID)
err = ins.ec2Client.WaitUntilInstanceStatusOk(&ec2.DescribeInstanceStatusInput{InstanceIds: aws.StringSlice([]string{ins.InstanceID})})
if err != nil {
return fmt.Errorf("Error waiting for instance to become available : %s", err)
}
return err
} | [
"func startInstance(ec2Service *ec2.EC2, instance *ec2.Instance) error {\n\tinstanceState, err := getInstanceState(*instance)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif instanceState == \"shutting-down\" || instanceState == \"terminated\" || instanceState == \"stopping\" || instanceState == \"stopped\" {\n\t\tfmt.Errorf(\"instance stopped or terminated\")\n\t}\n\n\tinstanceId := *instance.InstanceId\n\tinput := &ec2.StartInstancesInput{\n\t\tInstanceIds: []*string {\n\t\t\taws.String(instanceId),\n\t\t},\n\t}\n\n\t_, err = ec2Service.StartInstances(input)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(\"starting...\")\n\treturn nil\n}",
"func (self *AWSEC2Instance) StartInstance() (*StartResult, error) {\n out, err := self.control_ec2(start_instance, no_query)\n if err != nil {\n return nil, err\n }\n\n var sr StartResult\n err = json.Unmarshal(out, &sr)\n if err != nil {\n return nil, err\n }\n\n return &sr, nil\n}",
"func (p *ProxMox) StartInstance(ctx *lepton.Context, instanceID string) error {\n\n\treq, err := http.NewRequest(\"POST\", p.apiURL+\"/api2/json/nodes/\"+p.nodeNAME+\"/qemu/\"+instanceID+\"/status/start\", nil)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := &http.Client{Transport: tr}\n\n\treq.Header.Add(\"Authorization\", \"PVEAPIToken=\"+p.tokenID+\"=\"+p.secret)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\t_, err = io.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func runAndWaitForInstance(svc *ec2.EC2, name string, params *ec2.RunInstancesInput) (ec2.Instance, error) {\n\trunResult, err := svc.RunInstances(params)\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\tif len(runResult.Instances) == 0 {\n\t\terr := errors.New(\"Could not create test EC2 instance\")\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\terr = svc.WaitUntilInstanceExists(&ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: awsgo.String(\"instance-id\"),\n\t\t\t\tValues: []*string{runResult.Instances[0].InstanceId},\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\t// Add test tag to the created instance\n\t_, err = svc.CreateTags(&ec2.CreateTagsInput{\n\t\tResources: []*string{runResult.Instances[0].InstanceId},\n\t\tTags: []*ec2.Tag{\n\t\t\t{\n\t\t\t\tKey: awsgo.String(\"Name\"),\n\t\t\t\tValue: awsgo.String(name),\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\t// EC2 Instance must be in a running before this function returns\n\terr = svc.WaitUntilInstanceRunning(&ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: awsgo.String(\"instance-id\"),\n\t\t\t\tValues: []*string{runResult.Instances[0].InstanceId},\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\treturn *runResult.Instances[0], nil\n\n}",
"func (c *Client) StartInstance(id string) error {\n\n\tactionRequest := core.InstanceActionRequest{}\n\tactionRequest.Action = core.InstanceActionActionStart\n\tactionRequest.InstanceId = &id\n\n\tstartResp, err := c.computeClient.InstanceAction(context.Background(), actionRequest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// wait until lifecycle status is Running\n\tpollUntilRunning := func(r common.OCIOperationResponse) bool {\n\t\tif converted, ok := r.Response.(core.GetInstanceResponse); ok {\n\t\t\treturn converted.LifecycleState != core.InstanceLifecycleStateRunning\n\t\t}\n\t\treturn true\n\t}\n\n\tpollingGetRequest := core.GetInstanceRequest{\n\t\tInstanceId: startResp.Instance.Id,\n\t\tRequestMetadata: helpers.GetRequestMetadataWithCustomizedRetryPolicy(pollUntilRunning),\n\t}\n\n\t_, err = c.computeClient.GetInstance(context.Background(), pollingGetRequest)\n\n\treturn err\n}",
"func (p *OnPrem) StartInstance(ctx *Context, instancename string) error {\n\treturn fmt.Errorf(\"Operation not supported\")\n}",
"func (a *Agent) spawnInstance(ctx context.Context, c instance.Config) {\n\tinst, err := a.instanceFactory(a.cfg.Global, c, a.cfg.WALDir, a.logger)\n\tif err != nil {\n\t\tlevel.Error(a.logger).Log(\"msg\", \"failed to create instance\", \"err\", err)\n\t\treturn\n\t}\n\n\tfor {\n\t\terr = inst.Run(ctx)\n\t\tif err != nil && err != context.Canceled {\n\t\t\tinstanceAbnormalExits.WithLabelValues(c.Name).Inc()\n\t\t\tlevel.Error(a.logger).Log(\"msg\", \"instance stopped abnormally, restarting after backoff period\", \"err\", err, \"backoff\", a.cfg.InstanceRestartBackoff, \"instance\", c.Name)\n\t\t\ttime.Sleep(a.cfg.InstanceRestartBackoff)\n\t\t} else {\n\t\t\tlevel.Info(a.logger).Log(\"msg\", \"stopped instance\", \"instance\", c.Name)\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func (c *Client) StartInstance(instanceId string) error {\n\treturn StartInstance(c, instanceId)\n}",
"func runInstance(projectURL string) (*ec2.Instance, error) {\n\tuserData := fmt.Sprintf(userDataTemplate, projectURL)\n\tencUserData := base64.StdEncoding.EncodeToString([]byte(userData))\n\n\trunInput := &ec2.RunInstancesInput{\n\t\tImageId: aws.String(imageID),\n\t\tInstanceType: aws.String(instanceType),\n\t\tMaxCount: aws.Int64(1),\n\t\tMinCount: aws.Int64(1),\n\t\tUserData: aws.String(encUserData),\n\t\tSecurityGroups: []*string{aws.String(securityGroupName)},\n\t\t// KeyName: aws.String(\"test-keypair.pem\"), //TODO remove this field, added to connect to instances\n\t}\n\tlog.Info(\"Making call to RunInstances with input: \", runInput)\n\n\tresult, err := svc.RunInstances(runInput)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed call to RunInstances\")\n\t}\n\tlog.Info(\"Got RunInstances result\", result)\n\n\tif len(result.Instances) == 0 {\n\t\treturn nil, errors.NewServer(\"RunInstances returned no instances\")\n\t}\n\tinstance := result.Instances[0]\n\tif instance.InstanceId == nil || *instance.InstanceId == \"\" {\n\t\treturn nil, errors.NewServer(\"RunInstances did not return InstanceId\")\n\t}\n\n\ttagInput := &ec2.CreateTagsInput{\n\t\tResources: []*string{instance.InstanceId},\n\t\tTags: []*ec2.Tag{\n\t\t\t{\n\t\t\t\tKey: aws.String(\"Name\"),\n\t\t\t\tValue: aws.String(\"CRUD Creator Server\"),\n\t\t\t},\n\t\t},\n\t}\n\tlog.Info(\"Making call to CreateTags with input: \", tagInput)\n\t_, err = svc.CreateTags(tagInput)\n\n\tlog.Info(\"Created instance: \", instance)\n\treturn instance, errors.Wrap(err, \"Failed call to CreateTags\")\n}",
"func (c *EC2) createAWSEC2Instance(input *RunInstancesInput) (r aws.Referencer, attr aws.Attrabuter, err error) {\n\n\truninstancesrequest := input\n\treservation, err := RunInstances(runinstancesrequest)\n\tif err == nil {\n\t\tdescribeinstancesrequest := &DescribeInstancesInput{}\n\t\tif err := awsutil.CopyValue(describeinstancesrequest, \"InstanceIds\", reservation, \"Instances[].InstanceId\"); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\t\tif err := WaitUntilInstanceRunning(describeinstancesrequest); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\n\t} else {\n\t\treturn nil, nil, err\n\t}\n\tstartinstancesrequest := &StartInstancesInput{}\n\tif err := awsutil.CopyValue(startinstancesrequest, \"InstanceIds\", reservation, \"Instances[].InstanceId\"); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tstartinstancesresult, err := StartInstances(startinstancesrequest)\n\tif err == nil {\n\t\tdescribeinstancesrequest := &DescribeInstancesInput{}\n\t\tif err := awsutil.CopyValue(describeinstancesrequest, \"InstanceIds\", reservation, \"Instances[].InstanceId\"); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\t\tif err := WaitUntilInstanceRunning(describeinstancesrequest); err != nil {\n\t\t\treturn reservation, reservation, err\n\t\t}\n\n\t} else {\n\t\treturn nil, nil, err\n\t}\n\treturn reservation, reservation, nil\n}",
"func (cc *ComputeClient) WaitInstance(ctx context.Context) error {\n\tgcmd := cc.gCloudComputeSSH([]string{\"date\"})\n\treturn neco.RetryWithSleep(ctx, retryCount, time.Second,\n\t\tfunc(ctx context.Context) error {\n\t\t\tc := well.CommandContext(ctx, gcmd[0], gcmd[1:]...)\n\t\t\tc.Stdin = os.Stdin\n\t\t\tc.Stdout = os.Stdout\n\t\t\tc.Stderr = os.Stderr\n\t\t\treturn c.Run()\n\t\t},\n\t\tfunc(err error) {\n\t\t\tlog.Error(\"failed to check online of the instance\", map[string]interface{}{\n\t\t\t\tlog.FnError: err,\n\t\t\t\t\"instance\": cc.instance,\n\t\t\t})\n\t\t},\n\t)\n}",
"func (s *AWSClient) StartInstances(n int64, iConfig *InstanceConfig) ([]string, error) {\n\tvar err error\n\n\tlogger.Debug(fmt.Sprintf(\"Starting %d instances of Synthea for task %s\", n, iConfig.TaskID))\n\n\t// The InstanceConfig must be validated before doing anything.\n\tif !ValidateConfig(iConfig, s.Config) {\n\t\treturn nil, errors.New(\"Invalid InstanceConfig\")\n\t}\n\n\t// Serialie the InstanceConfig to pass it as UserData.\n\trawUserData, err := json.Marshal(iConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// The raw user data must be base64 encoded. It will be automatically\n\t// decoded when Synthea requests it from the EC2 instance user data.\n\tencodedUserData := b64.StdEncoding.EncodeToString(rawUserData)\n\n\t// Make a RunInstances request for n Synthea instances\n\trunParams := &ec2.RunInstancesInput{\n\t\tImageId: aws.String(s.Config.SyntheaImageID),\n\t\tInstanceType: aws.String(s.Config.SyntheaInstanceType),\n\t\tMinCount: aws.Int64(n),\n\t\tMaxCount: aws.Int64(n),\n\t\tSecurityGroupIds: []*string{aws.String(s.Config.SyntheaSecurityGroupID)},\n\t\tIamInstanceProfile: &ec2.IamInstanceProfileSpecification{\n\t\t\t// This is an ARN to an EC2 Role with one or more associated policies\n\t\t\tArn: aws.String(s.Config.SyntheaRoleArn),\n\t\t},\n\t\tSubnetId: aws.String(s.Config.SyntheaSubnetID),\n\t\tUserData: aws.String(encodedUserData),\n\t}\n\treservation, err := s.EC2.RunInstances(runParams)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to start instances for task \" + iConfig.TaskID)\n\t\treturn nil, err\n\t}\n\n\t// Parse the instance IDs out of the reservation\n\tinstanceIDs := make([]*string, len(reservation.Instances))\n\tfor i, instance := range reservation.Instances {\n\t\tinstanceIDs[i] = instance.InstanceId\n\t}\n\tstrInstanceIDs := toStrings(instanceIDs)\n\n\tlogger.Debug(fmt.Sprintf(\"Started %d instances: %v\", n, strInstanceIDs))\n\n\t// Tag these instance with \"stork-synthea\" and the taskID\n\t// so we know who they belong to\n\tlogger.Debug(fmt.Sprintf(\"Tagging instances %v\", strInstanceIDs))\n\n\ttagParams := &ec2.CreateTagsInput{\n\t\tResources: instanceIDs,\n\t\tTags: []*ec2.Tag{\n\t\t\t&ec2.Tag{\n\t\t\t\tKey: aws.String(\"role\"),\n\t\t\t\tValue: aws.String(\"stork-synthea\"),\n\t\t\t},\n\t\t\t&ec2.Tag{\n\t\t\t\tKey: aws.String(\"task\"),\n\t\t\t\tValue: aws.String(iConfig.TaskID),\n\t\t\t},\n\t\t},\n\t}\n\t_, err = s.EC2.CreateTags(tagParams)\n\tif err != nil {\n\t\tlogger.Error(fmt.Sprintf(\"Failed to tag instances %v\", strInstanceIDs))\n\t\treturn nil, err\n\t}\n\tlogger.Debug(fmt.Sprintf(\"Tagged instances %v\", strInstanceIDs))\n\n\treturn strInstanceIDs, nil\n}",
"func Start_stop_instances(ec2Instance *ec2.EC2, instance_id string, op_name string) {\n switch op_name {\n case \"stop\":\n input := &ec2.StopInstancesInput {\n InstanceIds : []*string{\n aws.String(instance_id),\n },\n }\n result, err := ec2Instance.StopInstances(input)\n if err != nil {\n fmt.Println(\"Error in stopping the instance\", err)\n } else {\n\t\t fmt.Println(\"Success:\", result.StoppingInstances)\n }\n case \"start\":\n input := &ec2.StartInstancesInput {\n InstanceIds : []*string{\n aws.String(instance_id),\n },\n }\n result, err := ec2Instance.StartInstances(input)\n if err != nil {\n fmt.Println(\"Error in starting the instance\", err)\n } else {\n\t\t fmt.Println(\"Success:\", result.StartingInstances)\n }\n default:\n fmt.Println(\"No input provided\")\n }\n}",
"func (instanceApi *InstanceApi) Start(id string) (bool, error) {\n\t_, err := instanceApi.entityService.Execute(id, INSTANCE_START_OPERATION, []byte{}, map[string]string{})\n\treturn err == nil, err\n}",
"func (a *AliyunInstanceAttribute) startBastionHostInstance() {\n\tattemptCnt := 0\n\tfor attemptCnt < 60 {\n\t\tres, err := ExecCmdReturnOutput(\"bash\", \"-c\", \"aliyun ecs DescribeInstanceAttribute --InstanceId=\"+a.BastionInstanceID)\n\t\tcheckError(err)\n\t\tdecodedQuery := decodeAndQueryFromJSONString(res)\n\t\tstatus, err := decodedQuery.String(\"Status\")\n\t\tcheckError(err)\n\t\tif status == \"Running\" {\n\t\t\ttime.Sleep(time.Second * 30)\n\t\t\tfmt.Println(\"Bastion host started.\")\n\t\t\tbreak\n\t\t} else if status == \"Stopped\" {\n\t\t\tfmt.Println(\"Starting bastion host...\")\n\t\t\t_, err = ExecCmdReturnOutput(\"bash\", \"-c\", \"aliyun ecs StartInstance --InstanceId=\"+a.BastionInstanceID)\n\t\t\tcheckError(err)\n\t\t} else if status == \"Starting\" {\n\t\t\tfmt.Println(\"Waiting for bastion host to start...\")\n\t\t} else if status == \"Stopping\" {\n\t\t\tfmt.Println(\"Bastion host is currently stopping...\")\n\t\t}\n\t\ttime.Sleep(time.Second * 2)\n\t\tattemptCnt++\n\t}\n\tif attemptCnt == 60 {\n\t\tfmt.Println(\"Bastion host starting time out. Please try again.\")\n\t\tos.Exit(2)\n\t}\n\tfmt.Println(\"Allocating bastion host IP address...\")\n\tres, err := ExecCmdReturnOutput(\"bash\", \"-c\", \"aliyun ecs AllocatePublicIpAddress --InstanceId=\"+a.BastionInstanceID)\n\tcheckError(err)\n\tdecodedQuery := decodeAndQueryFromJSONString(res)\n\ta.BastionIP, err = decodedQuery.String(\"IpAddress\")\n\tcheckError(err)\n\ttime.Sleep(time.Second * 10)\n\tfmt.Println(\"Bastion host IP address allocated.\")\n}",
"func (p *ProcessDefinition) StartInstance(by QueryProcessDefinitionBy, req ReqStartInstance) (processDefinition *ResStartedProcessDefinition, err error) {\n\tprocessDefinition = &ResStartedProcessDefinition{}\n\tres, err := p.client.doPostJson(\"/process-definition/\"+by.String()+\"/start\", map[string]string{}, &req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = p.client.readJsonResponse(res, processDefinition)\n\treturn\n}",
"func waitForInstanceState(\n\tdesiredState string, instanceID string, client *civogo.Client, timeout time.Duration) error {\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tresult := make(chan error, 1)\n\tgo func() {\n\t\tattempts := 0\n\t\tfor {\n\t\t\tattempts++\n\n\t\t\tlog.Printf(\"Checking instance status... (attempt: %d)\", attempts)\n\t\t\tinstance, err := client.GetInstance(instanceID)\n\t\t\tif err != nil {\n\t\t\t\tresult <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif instance.Status == desiredState {\n\t\t\t\tresult <- nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Wait 3 seconds in between\n\t\t\ttime.Sleep(3 * time.Second)\n\n\t\t\t// Verify we shouldn't exit\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\t// We finished, so just exit the goroutine\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\t// Keep going\n\t\t\t}\n\t\t}\n\t}()\n\n\tlog.Printf(\"Waiting for up to %d seconds for instance to become %s\", timeout/time.Second, desiredState)\n\tselect {\n\tcase err := <-result:\n\t\treturn err\n\tcase <-time.After(timeout):\n\t\terr := fmt.Errorf(\"Timeout while waiting to for instance to become '%s'\", desiredState)\n\t\treturn err\n\t}\n}",
"func (adapter *HttpAdapter) CreateInstance(params *CreateInstanceParams) (*CreateInstanceResult, error) {\n\tinstanceURL := fmt.Sprintf(\"%s/v2/service_instances/%s\", params.Server, params.InstanceID)\n\n\tputBody := &osb.ProvisionRequestBody{\n\t\tServiceID: params.ServiceID,\n\t\tPlanID: params.PlanID,\n\t\tContext: params.Context,\n\t\tOrganizationGUID: params.OrganizationGUID,\n\t\tSpaceGUID: params.SpaceGUID,\n\t\tParameters: params.Parameters,\n\t}\n\n\tputParams := url.Values{}\n\tputParams.Set(acceptsIncompleteKey, strconv.FormatBool(params.AcceptsIncomplete))\n\n\trespCode, respBody, err := adapter.doOSBRequest(instanceURL, http.MethodPut, putBody, putParams)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch respCode {\n\tcase http.StatusOK, http.StatusCreated:\n\t\t// Identical service instance already exists, or service instance is provisioned synchronously.\n\t\trb := &osb.ProvisionResponseBody{}\n\t\terr = json.Unmarshal(respBody, rb)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error unmarshalling response body: %s\\nerror: %v\", string(respBody), err)\n\t\t}\n\t\treturn &CreateInstanceResult{Async: false, DashboardURL: rb.DashboardURL, OperationID: rb.Operation}, nil\n\tcase http.StatusAccepted:\n\t\t// Service instance is being provisioned asynchronously.\n\t\tif !params.AcceptsIncomplete {\n\t\t\treturn nil, fmt.Errorf(\"request shouldn't be handled asynchronously: %s\", string(respBody))\n\t\t}\n\n\t\trb := &osb.ProvisionResponseBody{}\n\t\terr = json.Unmarshal(respBody, rb)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error unmarshalling response body: %s\\nerror: %v\", string(respBody), err)\n\t\t}\n\t\treturn &CreateInstanceResult{Async: true, DashboardURL: rb.DashboardURL, OperationID: rb.Operation}, nil\n\tcase http.StatusBadRequest:\n\t\treturn nil, fmt.Errorf(\"request was malformed or missing mandatory data: %s\", string(respBody))\n\tcase http.StatusConflict:\n\t\treturn nil, fmt.Errorf(\"service instance with the same id and different attributes already exists: %s\", string(respBody))\n\tcase http.StatusUnprocessableEntity:\n\t\treturn nil, fmt.Errorf(\"the broker only supports asynchronous requests: %s\", string(respBody))\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"request was not successful: %v\", string(respBody))\n\t}\n}",
"func (objInstance *Instance) StartInstance() {\n\tobjInstance.App = iris.New()\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
getIPAddress retrieves the public IP address from AWS. Returns error if no address found | func (ins *EC2RemoteClient) getIPAddress() error {
result, err := ins.ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{InstanceIds: aws.StringSlice([]string{ins.InstanceID})})
if err != nil {
return fmt.Errorf("Error getting instance details : %s", err)
}
ins.instanceIP = net.ParseIP(*result.Reservations[0].Instances[0].PublicIpAddress)
if ins.instanceIP == nil {
return fmt.Errorf("Error parsing IP address")
}
return err
} | [
"func (c *Client) GetIPAddress(id, compartmentID string) (string, error) {\n\tvnics, err := c.computeClient.ListVnicAttachments(context.Background(), core.ListVnicAttachmentsRequest{\n\t\tInstanceId: &id,\n\t\tCompartmentId: &compartmentID,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(vnics.Items) == 0 {\n\t\treturn \"\", errors.New(\"instance does not have any configured VNICs\")\n\t}\n\n\tvnic, err := c.virtualNetworkClient.GetVnic(context.Background(), core.GetVnicRequest{VnicId: vnics.Items[0].VnicId})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif vnic.PublicIp == nil {\n\t\treturn *vnic.PrivateIp, nil\n\t}\n\n\treturn *vnic.PublicIp, nil\n}",
"func (e *ec2_t) getIpAddr() {\n\tsvc := ec2.New(session.New(&aws.Config{Region: aws.String(e.Region)}))\n\tparams := &ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t/*\n\t\t\t\t&ec2.Filter{\n\t\t\t\t\tName: aws.String(\"instance-state-name\"),\n\t\t\t\t\tValues: []*string{\n\t\t\t\t\t\taws.String(\"running\"),\n\t\t\t\t\t\taws.String(\"pending\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t*/\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(\"instance-id\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(e.InstanceId),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tresp, err := svc.DescribeInstances(params)\n\tif err != nil {\n\t\tlog.Println(\"fail to get public and private IP address of ec2 instance. error: \", err)\n\t\treturn\n\t}\n\n\tinstance := resp.Reservations[0].Instances[0]\n\te.PrivateIp = *instance.PrivateIpAddress\n\te.PublicIp = *instance.PublicIpAddress\n\tlog.Println(\"successfully get ec2 instance's IP address. public ip: \", e.PublicIp, \" private ip: \", e.PrivateIp)\n\treturn\n\n}",
"func (s *Service) GetPublicIPAddress(resourceGroup string, IPName string) (armada.PublicIPAddress, error) {\n\t//JEB return s.scope.AirshipClients.PublicIPAddresses.Get(s.scope.Context, resourceGroup, IPName, \"\")\n\treturn armada.PublicIPAddress{}, nil\n}",
"func (c *Client) GetIPAddress(ctx context.Context, id string) (*InstanceIP, error) {\n\te, err := c.IPAddresses.Endpoint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\te = fmt.Sprintf(\"%s/%s\", e, id)\n\tr, err := coupleAPIErrors(c.R(ctx).SetResult(&InstanceIP{}).Get(e))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn r.Result().(*InstanceIP), nil\n}",
"func (s Service) GetIPAddress() string {\n\treturn s.container.NetworkSettings.IPAddress\n}",
"func GetPublicIP() (*net.IPAddr, error) {\n\n\t// make a request to IPGetURL to get the public ip address\n\tresp, err := http.Get(IPGetURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// process the response\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// convert the response into the ip address\n\treturn net.ResolveIPAddr(\"\", string(body))\n}",
"func GetIPAddress(r *http.Request) (string, error) {\r\n\tip, _, err := netHttp.SplitHostPort(r.RemoteAddr)\r\n\tif err != nil {\r\n\t\treturn \"\", err\r\n\t}\r\n\r\n\treturn ip, nil\r\n}",
"func (irkit *Irkit) GetIPAddress() string {\n\treturn irkit.Address\n}",
"func (ip *IPAddress)GetIpAddress() (ipAddress string, err error){\n\tnetInterfaces, err := net.Interfaces()\n\tif err != nil{\n\t\tlog4go.Error(err)\n\t\treturn\n\t}\n\tLoop:\n\tfor i := 0; i < len(netInterfaces); i++{\n\t\tif(netInterfaces[i].Flags & net.FlagUp) != 0{\n\t\t\taddrs, _ := netInterfaces[i].Addrs()\n\t\t\tfor _, address := range addrs{\n\t\t\t\tif ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback(){\n\t\t\t\t\tif ipnet.IP.To4()!=nil{\n\t\t\t\t\t\tipAddress = (ipnet.IP.String())\n\t\t\t\t\t\tip.IpAddress = ipAddress\n\t\t\t\t\t\tip.IpValid = true\n\t\t\t\t\t\tbreak Loop\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func GetPublicIPAddress(client Clients, ctx context.Context,\n\tresourceGroup string, PublicIPname string, expand string) (PublicIPAddress string, err error) {\n\tvmPublicIP := client.VMPublicIP\n\tVMIP, err := vmPublicIP.Get(ctx, resourceGroup, PublicIPname, expand)\n\tif err != nil {\n\t\t\treturn\n\t}\n\tif VMIP.PublicIPAddressPropertiesFormat != nil && VMIP.PublicIPAddressPropertiesFormat.IPAddress != nil {\n\t\t\tPublicIPAddress = *VMIP.PublicIPAddressPropertiesFormat.IPAddress\n\n\t} else {\n\t\t\terr = errors.New(\"Vm has no publicIPAddress\")\n\t}\n\treturn\n\n}",
"func getIP(ec2inst ec2Instance,c *ecs.Container) string{\n\tif len(c.NetworkInterfaces) > 0 {\n\t\treturn ptr.StringValue(c.NetworkInterfaces[0].PrivateIpv4Address)\n\t}\n\treturn ptr.StringValue(ec2inst.PrivateIpAddress)\n}",
"func publicIP() (net.IP, error) {\n\tresp, err := http.Get(publicEcho)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tout, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tn := net.ParseIP(string(out))\n\tif n == nil {\n\t\treturn nil, fmt.Errorf(\"cannot parse ip %s\", string(out))\n\t}\n\n\treturn n, nil\n}",
"func (i EC2Instance) PublicIP() string {\n\tif i.publicIP == \"\" {\n\t\tlog.Printf(\"ERROR: Attempting to get public IP of %s, which is not know\\n\", i.name)\n\t}\n\treturn i.publicIP\n}",
"func GetIPAddress(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tplainAddress := vars[\"domainName\"]\n\tif reply, ok := servers[plainAddress]; ok {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tif enc, err := json.Marshal(reply); err == nil {\n\t\t\tw.Write([]byte(enc))\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\t} else {\n\t\treply := r.RemoteAddr\n\t\tif enc, err := json.Marshal(reply); err == nil {\n\t\t\tw.Write([]byte(enc))\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t}\n\t}\n}",
"func GetPublicAddress() (PublicAddress, error) {\n\tvar ip PublicAddress\n\tres, err := http.Get(\"http://ipinfo.io/json\")\n\tif err != nil {\n\t\treturn ip, err\n\t}\n\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn ip, err\n\t}\n\n\terr = json.Unmarshal(body, &ip)\n\tif err != nil {\n\t\treturn ip, nil\n\t}\n\n\treturn ip, nil\n}",
"func GetPublicIP(t *testing.T, region string, enis []string) *string {\n\tpublicIP, err := GetPublicIPE(t, region, enis)\n\trequire.NoError(t, err)\n\treturn publicIP\n}",
"func GetIPAddress(ipAddresses []string, privateRanges []IPRange) (string, bool) {\n\t// march from right to left until we get a public address\n\t// that will be the address right before our proxy.\n\tfor i := len(ipAddresses) - 1; i >= 0; i-- {\n\t\tip := strings.TrimSpace(ipAddresses[i])\n\t\trealIP := net.ParseIP(ip)\n\t\tif !realIP.IsGlobalUnicast() || IPIsPrivateSubnet(realIP, privateRanges) {\n\t\t\t// bad address, go to next\n\t\t\tcontinue\n\t\t}\n\t\treturn ip, true\n\n\t}\n\n\treturn \"\", false\n}",
"func GetPublicIPOfInstanceE(t *testing.T, projectID string, zone string, instanceID string) (string, error) {\n\tlogger.Logf(t, \"Getting Public IP for Compute Instance %s\", instanceID)\n\n\tctx := context.Background()\n\n\tservice, err := NewComputeServiceE(t)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tinstance, err := service.Instances.Get(projectID, zone, instanceID).Context(ctx).Do()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Instances.Get(%s) got error: %v\", instanceID, err)\n\t}\n\n\t// If there are no accessConfigs specified, then this instance will have no external internet access:\n\t// https://cloud.google.com/compute/docs/reference/rest/v1/instances.\n\tif len(instance.NetworkInterfaces[0].AccessConfigs) == 0 {\n\t\treturn \"\", fmt.Errorf(\"Attempted to get public IP of Compute Instance %s, but that Compute Instance does not have a public IP address\", instanceID)\n\t}\n\n\tip := instance.NetworkInterfaces[0].AccessConfigs[0].NatIP\n\n\treturn ip, nil\n}",
"func (m *ServicePrincipalRiskDetection) GetIpAddress()(*string) {\n val, err := m.GetBackingStore().Get(\"ipAddress\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
makeReady prepares an EC2 instance for running remote SSH commands | func (ins *EC2RemoteClient) makeReady() error {
// Check Instance is running - will error if instance doesn't exist
result, err := ins.ec2Client.DescribeInstanceStatus(&ec2.DescribeInstanceStatusInput{InstanceIds: aws.StringSlice([]string{ins.InstanceID})})
if err != nil {
return fmt.Errorf("Error getting instance status : %s", err)
}
// Start instance if needed
if len(result.InstanceStatuses) == 0 || *result.InstanceStatuses[0].InstanceState.Name != "running" {
err = ins.startInstance()
if err != nil {
return fmt.Errorf("Error starting instance : %s", err)
}
}
// Get Public IP address from ec2
err = ins.getIPAddress()
if err != nil {
return fmt.Errorf("Error getting IP address : %s", err)
}
// Set up SSH connection
ins.cmdClient, err = sshCmdClient.NewSSHCmdClient(ins.instanceIP, ins.sshCredentials)
if err != nil {
return err
}
// Check we can at least run a trivial command
exitStatus, err := ins.RunCommand("true")
if err != nil || exitStatus != 0 {
return fmt.Errorf("Error running commands on instance : %s", err)
}
return err
} | [
"func setupServer(c *cli.Context) {\n started := time.Now()\n\n config := deployConfig()\n logger := &logger{Prefix: config.Host}\n ssh := newSSHClient(config.Host, config.User)\n\n ssh.execute(fmt.Sprintf(\"mkdir -p %s\", config.DeployTo))\n\n for _, t := range config.Setup {\n ssh.execute(t)\n }\n\n ssh.close()\n\n logger.infof(\"Done in %f.\", time.Now().Sub(started).Seconds())\n}",
"func (instance *Host) WaitSSHReady(ctx context.Context, timeout time.Duration) (_ string, ferr fail.Error) {\n\tdefer fail.OnPanic(&ferr)\n\n\tif valid.IsNil(instance) {\n\t\treturn \"\", fail.InvalidInstanceError()\n\t}\n\tif ctx == nil {\n\t\treturn \"\", fail.InvalidParameterCannotBeNilError(\"ctx\")\n\t}\n\n\treturn instance.waitInstallPhase(ctx, userdata.PHASE5_FINAL, timeout)\n}",
"func (ins *EC2RemoteClient) startInstance() error {\n\tlog.Printf(\"Starting EC2 Instance %s\", ins.InstanceID)\n\t_, err := ins.ec2Client.StartInstances(&ec2.StartInstancesInput{InstanceIds: aws.StringSlice([]string{ins.InstanceID})})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error starting instance : %s\", err)\n\t}\n\tlog.Printf(\"Waiting for Instance %s to become ready (may take a few minutes)\", ins.InstanceID)\n\terr = ins.ec2Client.WaitUntilInstanceStatusOk(&ec2.DescribeInstanceStatusInput{InstanceIds: aws.StringSlice([]string{ins.InstanceID})})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error waiting for instance to become available : %s\", err)\n\t}\n\treturn err\n}",
"func (i EC2Instance) Setup() error {\n\tlog.Printf(\"DEBUG:aws: Setting up a compute instance\\n\")\n\t// must be able to deal with the exit status of script is 255 b/c of reboot command\n\tconf := utils.Config(\"partitions\").Lookup(i.partition).Contains()\n\tsetup, ok := conf[\"setup\"]\n\tif !ok {\n\t\tlog.Printf(\"DEBUG:aws: No setup required\\n\")\n\t\treturn nil\n\t}\n\tdirSplit := strings.SplitAfter(setup.Self(), \"/\")\n\tdir := strings.Join(dirSplit[:len(dirSplit)-1], \"\")\n\terr := exec.Command(setup.Self(), i.Name(), dir, conf[\"router\"].Self(), conf[\"salt\"].Self()).Run()\n\tif err != nil && !strings.Contains(err.Error(), \"255\") {\n\t\treturn fmt.Errorf(\"WARNING:aws: could not setup instance %s due to %v\", i.Name(), err)\n\t}\n\treturn nil\n}",
"func ConnectEC2(sshEntries lib.SSHEntries, sshConfigPath string, args []string) {\n\t// get the pub key from the ssh agent first\n\tsshAgent, err := net.Dial(\"unix\", os.Getenv(\"SSH_AUTH_SOCK\"))\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"can't connect to ssh agent, maybe SSH_AUTH_SOCK is unset?\")\n\t}\n\n\tkeys, err := agent.NewClient(sshAgent).List()\n\tif err != nil || len(keys) < 1 {\n\t\tlog.Fatal(\"Can't get public keys from ssh agent. Please ensure you have the ssh-agent running and have at least one identity added (with ssh-add)\")\n\t}\n\tpubkey := keys[0].String()\n\n\t// push the pub key to those instances one after each other\n\t// TODO: maybe make it parallel\n\tfor _, sshEntry := range sshEntries {\n\t\tvar instanceName = sshEntry.InstanceID\n\t\tif len(sshEntry.Names) > 0 {\n\t\t\tinstanceName = sshEntry.Names[0]\n\t\t}\n\t\tlog.WithField(\"instance\", instanceName).Info(\"trying to do ec2 connect...\")\n\t\tinstanceIPAddress, instanceUser, err := pushEC2Connect(sshEntry.ProfileConfig.Name, sshEntry.InstanceID, sshEntry.User, pubkey)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Fatal(\"can't push ssh key to the instance\")\n\t\t}\n\t\t// if the address is empty we set to the value we got from ec2 connect push\n\t\tif sshEntry.Address == \"\" {\n\t\t\tsshEntry.Address = instanceIPAddress\n\t\t}\n\t\tif sshEntry.User == \"\" {\n\t\t\tsshEntry.User = instanceUser\n\t\t}\n\t}\n\n\t// then generate ssh config for all instances in sshEntries\n\t// save the dynamic ssh config first\n\tif err := sshEntries.SaveConfig(sshConfigPath); err != nil {\n\t\tlog.WithError(err).Fatal(\"can't save ssh config for ec2 connect\")\n\t}\n\n\tvar instanceName = sshEntries[0].InstanceID\n\tif len(sshEntries[0].Names) > 0 {\n\t\tinstanceName = sshEntries[0].Names[0]\n\t}\n\t// connect to the first instance in sshEntry, as the other will be bastion(s)\n\tif len(args) == 0 {\n\t\t// construct default args\n\t\targs = []string{\n\t\t\t\"ssh\",\n\t\t\t\"-tt\",\n\t\t\tinstanceName,\n\t\t}\n\t}\n\n\tcommand, err := exec.LookPath(args[0])\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Can't find the binary in the PATH\")\n\t}\n\n\tvar replacer = strings.NewReplacer(\n\t\t\"{host}\", instanceName,\n\t\t\"{user}\", sshEntries[0].User,\n\t)\n\tvar newArgs []string\n\tfor _, arg := range args {\n\t\tnewArgs = append(newArgs, replacer.Replace(arg))\n\t}\n\tlog.WithField(\"instance_id\", sshEntries[0].InstanceID).Infof(\"Connecting to the instance using '%s'\", strings.Join(newArgs, \" \"))\n\n\tif err := syscall.Exec(command, newArgs, os.Environ()); err != nil {\n\t\tlog.WithFields(log.Fields{\"command\": command}).WithError(err).Fatal(\"can't run the command\")\n\t}\n}",
"func sshToAWSNode(nodeName, path, user, pathSSKeypair string, sshPublicKey []byte) {\n\ta := &AwsInstanceAttribute{}\n\ta.SSHPublicKey = sshPublicKey\n\tfmt.Println(\"\")\n\n\tfmt.Println(\"(1/4) Fetching data from target shoot cluster\")\n\ta.fetchAwsAttributes(nodeName, path)\n\tfmt.Println(\"Data fetched from target shoot cluster.\")\n\tfmt.Println(\"\")\n\n\tfmt.Println(\"(2/4) Setting up bastion host security group\")\n\ta.createBastionHostSecurityGroup()\n\tfmt.Println(\"\")\n\n\tdefer a.cleanupAwsBastionHost()\n\n\tfmt.Println(\"(3/4) Creating bastion host\")\n\ta.createBastionHostInstance()\n\n\tbastionNode := user + \"@\" + a.BastionIP\n\tnode := user + \"@\" + nodeName\n\tfmt.Println(\"Waiting 60 seconds until ports are open.\")\n\ttime.Sleep(60 * time.Second)\n\n\tkey := filepath.Join(pathSSKeypair, \"key\")\n\tsshCmd := fmt.Sprintf(\"ssh -i \" + key + \" -o \\\"ProxyCommand ssh -W %%h:%%p -i \" + key + \" -o IdentitiesOnly=yes -o StrictHostKeyChecking=no \" + bastionNode + \"\\\" \" + node + \" -o IdentitiesOnly=yes -o StrictHostKeyChecking=no\")\n\tcmd := exec.Command(\"bash\", \"-c\", sshCmd)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\tfmt.Println(err)\n\t}\n}",
"func AWSCreate() {\n\tSetClusterName()\n\tif _, err := os.Stat(\"./inventory/\" + common.Name + \"/provisioner/.terraform\"); err == nil {\n\t\tfmt.Println(\"Configuration folder already exists\")\n\t} else {\n\t\tsshUser, osLabel := distSelect()\n\t\tfmt.Printf(\"Prepairing Setup for user %s on %s\\n\", sshUser, osLabel)\n\t\tos.MkdirAll(\"./inventory/\"+common.Name+\"/provisioner\", 0755)\n\t\terr := exec.Command(\"cp\", \"-rfp\", \"./kubespray/contrib/terraform/aws/.\", \"./inventory/\"+common.Name+\"/provisioner\").Run()\n\t\tcommon.ErrorCheck(\"provisioner could not provided: %v\", err)\n\t\tprepareConfigFiles(osLabel)\n\t\tprovisioner.ExecuteTerraform(\"init\", \"./inventory/\"+common.Name+\"/provisioner/\")\n\t}\n\n\tprovisioner.ExecuteTerraform(\"apply\", \"./inventory/\"+common.Name+\"/provisioner/\")\n\n\t// waiting for Loadbalancer and other not completed stuff\n\tfmt.Println(\"Infrastructure is upcoming.\")\n\ttime.Sleep(15 * time.Second)\n\treturn\n\n}",
"func (e *SSHExecutor) Initialize(config SSHConfig) {\n\t// set default values\n\tif config.Port <= 0 {\n\t\tconfig.Port = 22\n\t}\n\n\tif config.Timeout == 0 {\n\t\tconfig.Timeout = time.Second * 5 // default timeout is 5 sec\n\t}\n\n\t// build easyssh config\n\te.Config = &easyssh.MakeConfig{\n\t\tServer: config.Host,\n\t\tPort: strconv.Itoa(config.Port),\n\t\tUser: config.User,\n\t\tTimeout: config.Timeout, // timeout when connecting to remote\n\t}\n\n\t// prefer private key authentication\n\tif len(config.KeyFile) > 0 {\n\t\te.Config.KeyPath = config.KeyFile\n\t\te.Config.Passphrase = config.Passphrase\n\t} else if len(config.Password) > 0 {\n\t\te.Config.Password = config.Password\n\t}\n}",
"func (p *EC2Provisioner) CreateInstance(opts EC2CreateInstanceOptions) (*cfg.Remote, error) {\n\t// Set requested region\n\tp.WithRegion(opts.Region)\n\n\t// set highlighter\n\tvar highlight = out.NewColorer(out.CY)\n\n\t// Generate authentication\n\tvar keyName = fmt.Sprintf(\"%s_%s_inertia_key_%d\", opts.Name, p.user, time.Now().UnixNano())\n\tout.Fprintf(p.out, highlight.Sf(\":key: Generating key pair '%s'...\\n\", keyName))\n\tkeyResp, err := p.client.CreateKeyPair(&ec2.CreateKeyPairInput{\n\t\tKeyName: aws.String(keyName),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thomeDir, err := os.UserHomeDir()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Save key\n\tvar keyPath = filepath.Join(homeDir, \".ssh\", *keyResp.KeyName)\n\tout.Fprintf(p.out, highlight.Sf(\":inbox_tray: Saving key to '%s'...\\n\", keyPath))\n\tif err = local.SaveKey(*keyResp.KeyMaterial, keyPath); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create security group for network configuration\n\tvar secGroup = fmt.Sprintf(\"%s-%d\", opts.Name, time.Now().UnixNano())\n\tout.Fprintf(p.out, highlight.Sf(\":circus_tent: Creating security group '%s'...\\n\", secGroup))\n\tgroup, err := p.client.CreateSecurityGroup(&ec2.CreateSecurityGroupInput{\n\t\tGroupName: aws.String(secGroup),\n\t\tDescription: aws.String(\n\t\t\tfmt.Sprintf(\"Rules for project %s on %s\", opts.ProjectName, opts.Name),\n\t\t),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Set rules for ports\n\tout.Fprintf(p.out, highlight.Sf(\":electric_plug: Exposing ports '%s'...\\n\", secGroup))\n\tif err = p.exposePorts(*group.GroupId, opts.DaemonPort, opts.Ports); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Start up instance\n\tout.Fprintf(p.out, highlight.Sf(\":boat: Requesting instance '%s'...\\n\", secGroup))\n\trunResp, err := p.client.RunInstances(&ec2.RunInstancesInput{\n\t\tImageId: aws.String(opts.ImageID),\n\t\tInstanceType: aws.String(opts.InstanceType),\n\t\tMinCount: aws.Int64(1),\n\t\tMaxCount: aws.Int64(1),\n\n\t\t// Security options\n\t\tKeyName: keyResp.KeyName,\n\t\tSecurityGroupIds: []*string{group.GroupId},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check response validity\n\tif runResp.Instances == nil || len(runResp.Instances) == 0 {\n\t\treturn nil, errors.New(\"Unable to start instances: \" + runResp.String())\n\t}\n\tout.Fprintf(p.out, highlight.Sf(\"A %s instance has been provisioned\", opts.InstanceType))\n\n\t// Loop until intance is running\n\tvar instance ec2.Instance\n\tfor {\n\t\t// Wait briefly between checks\n\t\ttime.Sleep(3 * time.Second)\n\n\t\t// Request instance status\n\t\tout.Fprintf(p.out, \"Checking status of the requested instance...\\n\")\n\t\tresult, err := p.client.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\t\tInstanceIds: []*string{runResp.Instances[0].InstanceId},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Check if reservations are present\n\t\tif result.Reservations == nil || len(result.Reservations) == 0 ||\n\t\t\tlen(result.Reservations[0].Instances) == 0 {\n\t\t\t// A reservation corresponds to a command to start instances\n\t\t\t// If nothing is here... we gotta keep waiting\n\t\t\tfmt.Fprintln(p.out, \"No reservations found yet.\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// Get status\n\t\ts := result.Reservations[0].Instances[0].State\n\t\tif s == nil {\n\t\t\tfmt.Println(p.out, \"Status unknown.\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// Code 16 means instance has started, and we can continue!\n\t\tif s.Code != nil && *s.Code == codeEC2InstanceStarted {\n\t\t\tfmt.Fprintln(p.out, \"Instance is running!\")\n\t\t\tinstance = *result.Reservations[0].Instances[0]\n\t\t\tbreak\n\t\t}\n\n\t\t// Otherwise, keep polling\n\t\tif s.Name != nil {\n\t\t\tfmt.Fprintln(p.out, \"Instance status: \"+*s.Name)\n\t\t} else {\n\t\t\tfmt.Fprintln(p.out, \"Instance status: \"+s.String())\n\t\t}\n\t\tcontinue\n\t}\n\n\t// Check instance validity\n\tif instance.PublicDnsName == nil {\n\t\treturn nil, errors.New(\"Unable to find public IP address for instance: \" + instance.String())\n\t}\n\n\t// Set tags\n\tout.Fprintf(p.out, \"Setting tags on instance...\\n\")\n\tif _, err = p.client.CreateTags(&ec2.CreateTagsInput{\n\t\tResources: []*string{instance.InstanceId},\n\t\tTags: []*ec2.Tag{\n\t\t\t{\n\t\t\t\tKey: aws.String(\"Name\"),\n\t\t\t\tValue: aws.String(opts.Name),\n\t\t\t},\n\t\t\t{\n\t\t\t\tKey: aws.String(\"Purpose\"),\n\t\t\t\tValue: aws.String(\"Inertia Continuous Deployment\"),\n\t\t\t},\n\t\t},\n\t}); err != nil {\n\t\tfmt.Fprintln(p.out, \"Failed to set tags: \"+err.Error())\n\t}\n\n\t// Poll for SSH port to open\n\tfmt.Fprintln(p.out, \"Waiting for ports to open...\")\n\tfor {\n\t\ttime.Sleep(3 * time.Second)\n\t\tfmt.Fprintln(p.out, \"Checking ports...\")\n\t\tif conn, err := net.Dial(\"tcp\", *instance.PublicDnsName+\":22\"); err == nil {\n\t\t\tfmt.Fprintln(p.out, \"Connection established!\")\n\t\t\tconn.Close()\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Generate webhook secret\n\tout.Fprintf(p.out, \"Generating a webhook secret...\\n\")\n\twebhookSecret, err := common.GenerateRandomString()\n\tif err != nil {\n\t\tfmt.Fprintln(p.out, err.Error())\n\t\tfmt.Fprintln(p.out, \"Using default secret 'inertia'\")\n\t\twebhookSecret = \"interia\"\n\t} else {\n\t\tfmt.Fprintf(p.out, \"Generated webhook secret: '%s'\\n\", webhookSecret)\n\t}\n\n\t// Return remote configuration\n\treturn &cfg.Remote{\n\t\tName: opts.Name,\n\t\tIP: *instance.PublicDnsName,\n\t\tSSH: &cfg.SSH{\n\t\t\tUser: p.user,\n\t\t\tIdentityFile: keyPath,\n\t\t\tSSHPort: \"22\",\n\t\t},\n\t\tDaemon: &cfg.Daemon{\n\t\t\tPort: strconv.FormatInt(opts.DaemonPort, 10),\n\t\t\tWebHookSecret: webhookSecret,\n\t\t},\n\t\tProfiles: make(map[string]string),\n\t}, nil\n}",
"func waitForHosts(path string) {\n\toldPwd := sh.Pwd()\n\tsh.Cd(path)\n\tlog.Debug(\"Ensuring ansible-playbook can be executed properly\")\n\tsh.SetE(exec.Command(\"ansible-playbook\", \"--version\"))\n\tpathToPlaybook := \"./playbooks/wait-for-hosts.yml\"\n\tansibleCommand := []string{\n\t\t\"-i\", \"plugins/inventory/terraform.py\",\n\t\t\"-e\", \"ansible_python_interpreter=\" + strings.TrimSpace(pythonBinary),\n\t\t\"-e\", \"@security.yml\",\n\t\tpathToPlaybook,\n\t}\n\tcmd := exec.Command(\"ansible-playbook\", ansibleCommand...)\n\tlog.Info(\"Waiting for SSH access to hosts...\")\n\toutStr, err := ExecuteWithOutput(cmd)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"command\": cmd.Args,\n\t\t\t\"output\": outStr,\n\t\t\t\"error\": err.Error(),\n\t\t}).Fatalf(\"Couldn't execute playbook %s\", pathToPlaybook)\n\t}\n\tsh.Cd(oldPwd)\n}",
"func runAndWaitForInstance(svc *ec2.EC2, name string, params *ec2.RunInstancesInput) (ec2.Instance, error) {\n\trunResult, err := svc.RunInstances(params)\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\tif len(runResult.Instances) == 0 {\n\t\terr := errors.New(\"Could not create test EC2 instance\")\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\terr = svc.WaitUntilInstanceExists(&ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: awsgo.String(\"instance-id\"),\n\t\t\t\tValues: []*string{runResult.Instances[0].InstanceId},\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\t// Add test tag to the created instance\n\t_, err = svc.CreateTags(&ec2.CreateTagsInput{\n\t\tResources: []*string{runResult.Instances[0].InstanceId},\n\t\tTags: []*ec2.Tag{\n\t\t\t{\n\t\t\t\tKey: awsgo.String(\"Name\"),\n\t\t\t\tValue: awsgo.String(name),\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\t// EC2 Instance must be in a running before this function returns\n\terr = svc.WaitUntilInstanceRunning(&ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: awsgo.String(\"instance-id\"),\n\t\t\t\tValues: []*string{runResult.Instances[0].InstanceId},\n\t\t\t},\n\t\t},\n\t})\n\n\tif err != nil {\n\t\treturn ec2.Instance{}, gruntworkerrors.WithStackTrace(err)\n\t}\n\n\treturn *runResult.Instances[0], nil\n\n}",
"func (s *ssh) WaitReady(hostName string, timeout time.Duration) error {\n\tif timeout < utils.TimeoutCtxHost {\n\t\ttimeout = utils.TimeoutCtxHost\n\t}\n\thost := &host{session: s.session}\n\tcfg, err := host.SSHConfig(hostName)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn conv.ToSystemSshConfig(cfg).WaitServerReady(timeout)\n}",
"func (svc *SSHService) WaitServerReady(hostParam interface{}, timeout time.Duration) error {\n\tvar err error\n\tsshSvc := NewSSHService(svc.provider)\n\tssh, err := sshSvc.GetConfig(hostParam)\n\tif err != nil {\n\t\treturn logicErrf(err, \"Failed to read SSH config\")\n\t}\n\twaitErr := ssh.WaitServerReady(timeout)\n\treturn infraErr(waitErr)\n}",
"func cmdSSH() {\n\tswitch state := status(B2D.VM); state {\n\tcase vmUnregistered:\n\t\tlog.Fatalf(\"%s is not registered.\", B2D.VM)\n\tcase vmRunning:\n\t\tcmdParts := append(strings.Fields(B2D.SSHPrefix), fmt.Sprintf(\"%d\", B2D.SSHPort), \"docker@localhost\")\n\t\tif err := cmd(cmdParts[0], cmdParts[1:]...); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tdefault:\n\t\tlog.Fatalf(\"%s is not running.\", B2D.VM)\n\t}\n}",
"func pushEC2Connect(profile, instanceID, instanceUser, pubKey string) (string, string, error) {\n\tctx := log.WithField(\"instance_id\", instanceID)\n\tcfg, err := config.LoadDefaultConfig(context.TODO(),\n\t\tconfig.WithSharedConfigProfile(profile))\n\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"can't get aws session: %s\", err)\n\t}\n\tec2Svc := ec2.NewFromConfig(cfg)\n\tec2Result, err := ec2Svc.DescribeInstances(context.TODO(), &ec2.DescribeInstancesInput{\n\t\tInstanceIds: []string{instanceID},\n\t})\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"can't get ec2 instance: %s\", err)\n\t}\n\n\tif len(ec2Result.Reservations) == 0 || len(ec2Result.Reservations[0].Instances) == 0 {\n\t\treturn \"\", \"\", fmt.Errorf(\"Couldn't find the instance %s\", instanceID)\n\t}\n\n\tec2Instance := ec2Result.Reservations[0].Instances[0]\n\tec2ICSvc := ec2instanceconnect.NewFromConfig(cfg)\n\n\t// no username has been provided, so we try to get it fom the instance tag first\n\tif instanceUser == \"\" {\n\t\tctx.Debug(\"no user has been set provided, trying to get it from the tags\")\n\t\t// next try to get username from the instance tags\n\t\tif instanceUser = lib.GetUserFromTags(ec2Instance.Tags); instanceUser == \"\" {\n\t\t\t// otherwise fallback to default\n\t\t\tctx.WithField(\"user\", defaultUser).Debugf(\"got no user from the instance tags, setting to default\")\n\t\t\tinstanceUser = defaultUser\n\t\t} else {\n\t\t\tctx.WithField(\"user\", instanceUser).Debugf(\"got username from tags\")\n\t\t}\n\t}\n\n\tctx.WithField(\"user\", instanceUser).Info(\"pushing SSH key...\")\n\n\tif _, err := ec2ICSvc.SendSSHPublicKey(context.TODO(), &ec2instanceconnect.SendSSHPublicKeyInput{\n\t\tInstanceId: ec2Instance.InstanceId,\n\t\tInstanceOSUser: aws.String(instanceUser),\n\t\tAvailabilityZone: ec2Instance.Placement.AvailabilityZone,\n\t\tSSHPublicKey: aws.String(pubKey),\n\t}); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"can't push ssh key: %s\", err)\n\t}\n\tvar address = aws.ToString(ec2Instance.PrivateIpAddress)\n\tif aws.ToString(ec2Instance.PublicIpAddress) != \"\" {\n\t\taddress = aws.ToString(ec2Instance.PublicIpAddress)\n\t}\n\treturn address, instanceUser, nil\n}",
"func testSSHToPublicHost(t *testing.T, terraformOptions *terraform.Options) {\n\t//get the GCP instance\n\tproject := terraformOptions.Vars[\"project\"].(string)\n\tinstanceName := terraformOptions.Vars[\"name\"].(string)\n\tinstance := gcp.FetchInstance(t, project, instanceName)\n\n\t// generate a ssh key pair\n\tkeyPair := ssh.GenerateRSAKeyPair(t, 2048)\n\n\t// add the public ssh key to the compute engine metadata\n\tsshUsername := \"terratest\"\n\tpublicKey := keyPair.PublicKey\n\tinstance.AddSshKey(t, sshUsername, publicKey)\n\n\t// Run `terraform output` to get the value of an output variable\n\tpublicInstanceIP := terraform.Output(t, terraformOptions, \"compute_engine_public_ip\")\n\n\t// We're going to try to SSH to the instance IP, using the Key Pair we created earlier, and the user \"terratest\",\n\t// as we know the Instance is running an Ubuntu AMI that has such a user\n\tpublicHost := ssh.Host{\n\t\tHostname: publicInstanceIP,\n\t\tSshKeyPair: keyPair,\n\t\tSshUserName: sshUsername,\n\t}\n\n\t// It can take a minute or so for the Instance to boot up, so retry a few times\n\tmaxRetries := 10\n\ttimeBetweenRetries := 5 * time.Second\n\tdescription := fmt.Sprintf(\"SSH to public host %s\", publicInstanceIP)\n\n\t// Run a simple echo command on the server\n\texpectedText := \"Hello, World\"\n\tcommand := fmt.Sprintf(\"echo -n '%s'\", expectedText)\n\n\t// Verify that we can SSH to the Instance and run commands\n\tretry.DoWithRetry(t, description, maxRetries, timeBetweenRetries, func() (string, error) {\n\t\tactualText, err := ssh.CheckSshCommandE(t, publicHost, command)\n\n\t\tif err != nil {\n\t\t\tlogger.Log(t, emoji.Sprint(\"--- :cross_mark: FAIL: testSSHToPublicHost\"))\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif strings.TrimSpace(actualText) != expectedText {\n\t\t\tlogger.Log(t, emoji.Sprint(\"--- :cross_mark: FAIL: testSSHToPublicHost\"))\n\t\t\treturn \"\", fmt.Errorf(\"Expected SSH command to return '%s' but got '%s'\", expectedText, actualText)\n\t\t}\n\n\t\tlogger.Log(t, emoji.Sprint(\"--- :check_mark_button: PASS: testSSHToPublicHost\"))\n\t\treturn \"\", nil\n\t})\n}",
"func SpawnSSH(c *ConfSSH) error {\n\n\tif c.PrivateKey == \"\" {\n\t\treturn fmt.Errorf(\"no host key has been set\")\n\t}\n\n\t// Configure the ssh server\n\tpk, err := ssh.ParsePrivateKey([]byte(c.PrivateKey))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse private key\")\n\t}\n\tc.ServerConfig.AddHostKey(pk)\n\tc.ServerConfig.PasswordCallback = getSSHHandlePassword(c)\n\tc.ServerConfig.PublicKeyCallback = getSSHHandlePublic(c)\n\n\t// Create the TCP listener\n\tlistener, err := net.Listen(\"tcp\", fmt.Sprintf(\"%s:%d\", c.BindHost, c.BindPort))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to listen on %s:%d (%s)\", c.BindHost, c.BindPort, err)\n\t}\n\tc.listener = listener\n\n\t// Start the ssh handler\n\tgo sshStart(c)\n\n\treturn nil\n}",
"func (a *AliyunInstanceAttribute) createBastionHostInstance(sshPublicKey []byte) {\n\tres, err := ExecCmdReturnOutput(\"bash\", \"-c\", \"aliyun ecs DescribeInstances --VpcId=\"+a.VpcID)\n\tcheckError(err)\n\tdecodedQuery := decodeAndQueryFromJSONString(res)\n\n\tinstances, err := decodedQuery.Array(\"Instances\", \"Instance\")\n\tcheckError(err)\n\tbastionServerExists := false\n\tfor _, iter := range instances {\n\t\tinstance := jsonq.NewQuery(iter)\n\t\tinstanceName, err := instance.String(\"InstanceName\")\n\t\tcheckError(err)\n\t\tif instanceName == a.BastionInstanceName {\n\t\t\tbastionServerExists = true\n\t\t\ta.BastionInstanceID, err = instance.String(\"InstanceId\")\n\t\t\tcheckError(err)\n\t\t\tif checkIsThereGardenerUser(a.BastionInstanceID) {\n\t\t\t\ta.BastionSSHUser = \"gardener\"\n\t\t\t} else {\n\t\t\t\t// The bastion is created before `gardener-user` change\n\t\t\t\ta.BastionSSHUser = \"root\"\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !bastionServerExists {\n\t\tuserData := getBastionUserData(sshPublicKey)\n\t\tencodedUserData := base64.StdEncoding.EncodeToString(userData)\n\n\t\targuments := \"aliyun ecs CreateInstance --ImageId=\" + a.ImageID + \" --InstanceType=\" + a.InstanceType + \" --RegionId=\" + a.RegionID + \" --ZoneId=\" + a.ZoneID + \" --VSwitchId=\" + a.VSwitchID + \" --InstanceChargeType=\" + a.InstanceChargeType + \" --InternetChargeType=\" + a.InternetChargeType + \" --InternetMaxBandwidthIn=\" + a.InternetMaxBandwidthIn + \" --InternetMaxBandwidthOut=\" + a.InternetMaxBandwidthOut + \" --IoOptimized=\" + a.IoOptimized + \" --InstanceName=\" + a.BastionInstanceName + \" --SecurityGroupId=\" + a.BastionSecurityGroupID + \" --UserData=\" + encodedUserData\n\t\tres, err = ExecCmdReturnOutput(\"bash\", \"-c\", arguments)\n\t\tcheckError(err)\n\t\tdecodedQuery = decodeAndQueryFromJSONString(res)\n\t\ta.BastionInstanceID, err = decodedQuery.String(\"InstanceId\")\n\t\ta.BastionSSHUser = \"gardener\"\n\t\tcheckError(err)\n\t\tattemptCnt := 0\n\t\tfor attemptCnt < 60 {\n\t\t\tres, err = ExecCmdReturnOutput(\"bash\", \"-c\", \"aliyun ecs DescribeInstances --InstanceIds=\\\"['\"+a.BastionInstanceID+\"']\\\"\")\n\t\t\tcheckError(err)\n\t\t\tdecodedQuery = decodeAndQueryFromJSONString(res)\n\t\t\ttotalCount, err := decodedQuery.Int(\"TotalCount\")\n\t\t\tcheckError(err)\n\t\t\tif totalCount == 1 {\n\t\t\t\ttime.Sleep(time.Second * 30)\n\t\t\t\tfmt.Println(\"Bastion host created.\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfmt.Println(\"Creating bastion host...\")\n\t\t\ttime.Sleep(time.Second * 2)\n\t\t\tattemptCnt++\n\t\t}\n\t\tif attemptCnt == 60 {\n\t\t\tfmt.Println(\"Bastion host creation time out. Please try again.\")\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n}",
"func (a *AwsInstanceAttribute) createBastionHostInstance() {\n\n\t// check if bastion host exists\n\ta.getBastionHostInstance()\n\tif a.BastionInstanceID != \"\" {\n\t\tfmt.Println(\"Bastion Host exists, skipping creation.\")\n\t\treturn\n\t}\n\n\ttmpfile, err := ioutil.TempFile(os.TempDir(), \"gardener-user.sh\")\n\tcheckError(err)\n\tdefer os.Remove(tmpfile.Name())\n\t_, err = tmpfile.Write(a.UserData)\n\tcheckError(err)\n\n\tinstanceType := \"\"\n\targuments := fmt.Sprintf(\"aws ec2 describe-instance-type-offerings --query %s\", \"InstanceTypeOfferings[].InstanceType\")\n\tcaptured := capture()\n\toperate(\"aws\", arguments)\n\tcapturedOutput, err := captured()\n\tcheckError(err)\n\twords := strings.Fields(capturedOutput)\n\tfor _, value := range words {\n\t\tif value == \"t2.nano\" {\n\t\t\tinstanceType = \"t2.nano\"\n\t\t}\n\t}\n\tif instanceType == \"\" {\n\t\tfor _, value := range words {\n\t\t\tif strings.HasPrefix(value, \"t\") {\n\t\t\t\tinstanceType = value\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// create bastion host\n\targuments = \"aws \" + fmt.Sprintf(\"ec2 run-instances --iam-instance-profile Name=%s --image-id %s --count 1 --instance-type %s --key-name %s --security-group-ids %s --subnet-id %s --associate-public-ip-address --user-data file://%s --tag-specifications ResourceType=instance,Tags=[{Key=Name,Value=%s},{Key=component,Value=gardenctl}] ResourceType=volume,Tags=[{Key=component,Value=gardenctl}]\", a.BastionInstanceName, a.ImageID, instanceType, a.KeyName, a.BastionSecurityGroupID, a.SubnetID, tmpfile.Name(), a.BastionInstanceName)\n\tcaptured = capture()\n\toperate(\"aws\", arguments)\n\tcapturedOutput, err = captured()\n\tcheckError(err)\n\twords = strings.Fields(capturedOutput)\n\tfor _, value := range words {\n\t\tif strings.HasPrefix(value, \"i-\") {\n\t\t\ta.BastionInstanceID = value\n\t\t}\n\t}\n\tfmt.Println(\"Bastion host instance created.\")\n\tfmt.Println(\"\")\n\n\t// check if bastion host is up and running, timeout after 3 minutes\n\tattemptCnt := 0\n\tfor attemptCnt < 60 {\n\t\targuments = \"aws ec2 describe-instances --instance-id=\" + a.BastionInstanceID + \" --query Reservations[*].Instances[].[State.Name] --output text\"\n\t\tcaptured = capture()\n\t\toperate(\"aws\", arguments)\n\t\tcapturedOutput, err = captured()\n\t\tcheckError(err)\n\t\tfmt.Println(\"Instance State: \" + capturedOutput)\n\t\tif strings.Trim(capturedOutput, \"\\n\") == \"running\" {\n\t\t\targuments := \"aws ec2 describe-instances --instance-id \" + a.BastionInstanceID + \" --query Reservations[*].Instances[*].PublicIpAddress\"\n\t\t\tcaptured := capture()\n\t\t\toperate(\"aws\", arguments)\n\t\t\tcapturedOutput, err := captured()\n\t\t\tcheckError(err)\n\t\t\ta.BastionIP = capturedOutput\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(time.Second * 2)\n\t\tattemptCnt++\n\t}\n\tif attemptCnt == 90 {\n\t\tfmt.Println(\"Bastion server instance timeout. Please try again.\")\n\t\tos.Exit(2)\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RunCommand is a wrapper around the SSH client to run a command abstracts the SSH connection details from the EC2 client interface RunCommandWithOutput discards the stdout and stderr from the command | func (ins *EC2RemoteClient) RunCommand(cmd string) (exitStatus int, err error) {
exitStatus, err = ins.cmdClient.RunCommand(cmd)
return exitStatus, err
} | [
"func (ins *EC2RemoteClient) RunCommandWithOutput(cmd string) (exitStatus int, stdoutBuf bytes.Buffer, stderrBuf bytes.Buffer, err error) {\n\texitStatus, stdoutBuf, stderrBuf, err = ins.cmdClient.RunCommandWithOutput(cmd)\n\treturn exitStatus, stdoutBuf, stderrBuf, err\n}",
"func (client *Client) Run(command string, silent bool) (output string, err error) {\n\tkeys := ssh.Auth{\n\t\tKeys: []string{client.PrivateKeyFile},\n\t}\n\tsshterm, err := ssh.NewNativeClient(client.User, client.Host, \"SSH-2.0-MyCustomClient-1.0\", client.Port, &keys, nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to request shell - %s\", err)\n\t}\n\tif !silent {\n\t\tlog.Printf(\"Running: ssh -i \\\"%s\\\" %s@%s %s\", client.PrivateKeyFile, client.User, client.Host, command)\n\t}\n\tr, _, err := sshterm.Start(command)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to start command - %s\", err)\n\t}\n\tsshterm.Wait()\n\tresponse, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to read response - %s\", err)\n\t}\n\treturn string(response), nil\n}",
"func RunCommandWithOutput(session *ssh.Session, log logrus.FieldLogger, command string, w io.Writer) (err error) {\n\tdefer func() {\n\t\tif err != nil && session != nil {\n\t\t\terrClose := session.Close()\n\t\t\tif errClose != nil {\n\t\t\t\tlog.WithError(err).Error(\"failed to close SSH session\")\n\t\t\t}\n\t\t}\n\t}()\n\tvar stderr io.Reader\n\tstderr, err = session.StderrPipe()\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tvar stdout io.Reader\n\tstdout, err = session.StdoutPipe()\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\terrCh := make(chan error, 2)\n\tsink := make(chan string)\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\terrCh <- stream(stdout, sink)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\terrCh <- stream(stderr, sink)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errCh)\n\t}()\n\tgo func() {\n\t\tw := bufio.NewWriter(w)\n\t\tfor line := range sink {\n\t\t\t_, err := w.Write([]byte(line))\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to write to w: %v\", err)\n\t\t\t}\n\t\t}\n\t\tw.Flush()\n\t\tclose(done)\n\t}()\n\n\terr = session.Start(command)\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to start %q\", command)\n\t}\n\n\terr = session.Wait()\n\tsession.Close()\n\tsession = nil // Avoid second close\n\tfor err := range errCh {\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to stream: %v\", err)\n\t\t}\n\t}\n\tclose(sink)\n\t<-done\n\n\treturn trace.Wrap(err)\n}",
"func SSHRunCommand(client *ssh.Client, cmd string) (string, error) {\n\tsession, err := client.NewSession()\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer session.Close()\n\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tsession.Stdout = &stdout\n\tsession.Stderr = &stderr\n\n\terr = session.Run(cmd)\n\n\toutput := stdout.String() + stderr.String()\n\n\tif err != nil {\n\t\treturn output, fmt.Errorf(\"failed to execute command \\\"%s\\\" via SSH client: %s\", cmd, err)\n\t}\n\n\treturn output, nil\n}",
"func RunSSHCommand(SSHHandle *ssh.Client, cmd string, sudo bool, bg bool, logger *log.Logger) (retCode int, stdout, stderr []string) {\n\tlogger.Println(\"Running cmd \" + cmd)\n\tvar stdoutBuf, stderrBuf bytes.Buffer\n\tsshSession, err := SSHHandle.NewSession()\n\tif err != nil {\n\t\tlogger.Printf(\"SSH session creation failed! %s\", err)\n\t\treturn -1, nil, nil\n\t}\n\tdefer sshSession.Close()\n\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 0, // disable echoing\n\t\tssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud\n\t\tssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud\n\t}\n\n\tif err = sshSession.RequestPty(\"xterm\", 80, 40, modes); err != nil {\n\t\tlogger.Println(\"SSH session Pty creation failed!\")\n\t\treturn -1, nil, nil\n\t}\n\n\tsshOut, err := sshSession.StdoutPipe()\n\tif err != nil {\n\t\tlogger.Println(\"SSH session StdoutPipe creation failed!\")\n\t\treturn -1, nil, nil\n\t}\n\tsshErr, err := sshSession.StderrPipe()\n\tif err != nil {\n\t\tlogger.Println(\"SSH session StderrPipe creation failed!\")\n\t\treturn -1, nil, nil\n\t}\n\n\tshout := io.MultiWriter(&stdoutBuf, (*LogWriter)(logger))\n\tssherr := io.MultiWriter(&stderrBuf, (*LogWriter)(logger))\n\n\tgo func() {\n\t\tio.Copy(shout, sshOut)\n\t}()\n\tgo func() {\n\n\t\tio.Copy(ssherr, sshErr)\n\t}()\n\n\tif bg {\n\t\tcmd = \"nohup sh -c \\\"\" + cmd + \" 2>&1 >/dev/null </dev/null & \\\"\"\n\t} else {\n\t\tcmd = \"sh -c \\\"\" + cmd + \"\\\"\"\n\t}\n\n\tif sudo {\n\t\tcmd = SudoCmd(cmd)\n\t}\n\n\tlogger.Println(\"Running command : \" + cmd)\n\tif err = sshSession.Run(cmd); err != nil {\n\t\tlogger.Println(\"failed command : \" + cmd)\n\t\tswitch v := err.(type) {\n\t\tcase *ssh.ExitError:\n\t\t\tretCode = v.Waitmsg.ExitStatus()\n\t\tdefault:\n\t\t\tretCode = -1\n\t\t}\n\t} else {\n\t\tlogger.Println(\"sucess command : \" + cmd)\n\t\tretCode = 0\n\t}\n\n\tstdout = strings.Split(stdoutBuf.String(), \"\\n\")\n\tstderr = strings.Split(stderrBuf.String(), \"\\n\")\n\tlogger.Println(stdout)\n\tlogger.Println(stderr)\n\tlogger.Println(\"Return code : \" + strconv.Itoa(retCode))\n\n\treturn retCode, stdout, stderr\n\n}",
"func (vtctldclient *VtctldClientProcess) ExecuteCommandWithOutput(args ...string) (string, error) {\n\tvar resultByte []byte\n\tvar resultStr string\n\tvar err error\n\tretries := 10\n\tretryDelay := 1 * time.Second\n\tpArgs := []string{\"--server\", vtctldclient.Server}\n\tif *isCoverage {\n\t\tpArgs = append(pArgs, \"--test.coverprofile=\"+getCoveragePath(\"vtctldclient-\"+args[0]+\".out\"), \"--test.v\")\n\t}\n\tpArgs = append(pArgs, args...)\n\tfor i := 1; i <= retries; i++ {\n\t\ttmpProcess := exec.Command(\n\t\t\tvtctldclient.Binary,\n\t\t\tfilterDoubleDashArgs(pArgs, vtctldclient.VtctldClientMajorVersion)...,\n\t\t)\n\t\tlog.Infof(\"Executing vtctldclient with command: %v (attempt %d of %d)\", strings.Join(tmpProcess.Args, \" \"), i, retries)\n\t\tresultByte, err = tmpProcess.CombinedOutput()\n\t\tresultStr = string(resultByte)\n\t\tif err == nil || !shouldRetry(resultStr) {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(retryDelay)\n\t}\n\treturn filterResultWhenRunsForCoverage(resultStr), err\n}",
"func SSHRunCommand(ctx context.Context, user string, host string, command string) error {\n\tclient, err := sshClient(ctx, user, host)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error creating client\")\n\t}\n\tdefer func() { _ = client.Close() }()\n\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error creating session\")\n\t}\n\tdefer func() { _ = session.Close() }()\n\n\toutput, err := session.CombinedOutput(command)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Error running %s on %s: %s\", command, host, output)\n\t}\n\treturn nil\n}",
"func (client *SSHClient) RunCommand(cmd *SSHCommand) error {\n\tsession, err := client.newSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\n\t_, err = runCommand(session, cmd)\n\treturn err\n}",
"func (sc *sshclient) RunWithResults(address string, command string) (string, error) {\n\tclient, err := ssh.Dial(\"tcp\", address, sc.config)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Could not connect to address %s:%v \", address, err)\n\t}\n\n\t// Each ClientConn can support multiple interactive sessions,\n\t// represented by a Session.\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Could not create session at address %s:%v \", address, err)\n\t}\n\tdefer session.Close()\n\n\tresultdata, err := session.Output(command)\n\tif err != nil {\n\t\treturn string(resultdata), fmt.Errorf(\"Command '%s' at address %s produced an error:%v \", command, address, err)\n\t}\n\n\treturn string(resultdata), nil\n}",
"func (ins *Instance) RunCommand(cmd string) ssh.Result {\n\tif ins.ExternalIP == \"\" {\n\t\tins.populateExternalIP()\n\t}\n\treturn ssh.Run(cmd, ins.ExternalIP, ins.SshUser, ins.SshKey)\n}",
"func (s *MockSSHClient) RunCommand(cmd string) (string, error) {\n\tif s.MockRunCommand != nil {\n\t\treturn s.MockRunCommand(cmd)\n\t}\n\treturn \"\", nil\n}",
"func (sdk SDKProperties) RunSSHCommand(targetAddress string, customSSHConfig string, privateKey string, verbose bool, args []string) (string, error) {\n\n\tcmdArgs, err := buildSSHArgs(sdk, targetAddress, customSSHConfig, privateKey, verbose, args)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn runSSH(cmdArgs, false)\n}",
"func (e *SSHExecutor) Execute(cmd string, sudo bool, timeout ...time.Duration) ([]byte, []byte, error) {\n\t// try to acquire root permission\n\tif e.Sudo || sudo {\n\t\tcmd = fmt.Sprintf(\"sudo -H -u root bash -c \\\"%s\\\"\", cmd)\n\t}\n\n\t// set a basic PATH in case it's empty on login\n\tcmd = fmt.Sprintf(\"PATH=$PATH:/usr/bin:/usr/sbin %s\", cmd)\n\n\tif e.Locale != \"\" {\n\t\tcmd = fmt.Sprintf(\"export LANG=%s; %s\", e.Locale, cmd)\n\t}\n\n\t// run command on remote host\n\t// default timeout is 60s in easyssh-proxy\n\tif len(timeout) == 0 {\n\t\ttimeout = append(timeout, executeDefaultTimeout)\n\t}\n\n\tstdout, stderr, done, err := e.Config.Run(cmd, timeout...)\n\n\tzap.L().Info(\"SSHCommand\",\n\t\tzap.String(\"host\", e.Config.Server),\n\t\tzap.String(\"port\", e.Config.Port),\n\t\tzap.String(\"cmd\", cmd),\n\t\tzap.Error(err),\n\t\tzap.String(\"stdout\", stdout),\n\t\tzap.String(\"stderr\", stderr))\n\n\tif err != nil {\n\t\tbaseErr := ErrSSHExecuteFailed.\n\t\t\tWrap(err, \"Failed to execute command over SSH for '%s@%s:%s'\", e.Config.User, e.Config.Server, e.Config.Port).\n\t\t\tWithProperty(ErrPropSSHCommand, cmd).\n\t\t\tWithProperty(ErrPropSSHStdout, stdout).\n\t\t\tWithProperty(ErrPropSSHStderr, stderr)\n\t\tif len(stdout) > 0 || len(stderr) > 0 {\n\t\t\toutput := strings.TrimSpace(strings.Join([]string{stdout, stderr}, \"\\n\"))\n\t\t\tbaseErr = baseErr.\n\t\t\t\tWithProperty(cliutil.SuggestionFromFormat(\"Command output on remote host %s:\\n%s\\n\",\n\t\t\t\t\te.Config.Server,\n\t\t\t\t\tcolor.YellowString(output)))\n\t\t}\n\t\treturn []byte(stdout), []byte(stderr), baseErr\n\t}\n\n\tif !done { // timeout case,\n\t\treturn []byte(stdout), []byte(stderr), ErrSSHExecuteTimedout.\n\t\t\tWrap(err, \"Execute command over SSH timedout for '%s@%s:%s'\", e.Config.User, e.Config.Server, e.Config.Port).\n\t\t\tWithProperty(ErrPropSSHCommand, cmd).\n\t\t\tWithProperty(ErrPropSSHStdout, stdout).\n\t\t\tWithProperty(ErrPropSSHStderr, stderr)\n\t}\n\n\treturn []byte(stdout), []byte(stderr), nil\n}",
"func RunCommandAndStoreOutput(vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) {\n\tvirtClient := kubevirt.Client()\n\n\topts := &kubecli.SerialConsoleOptions{ConnectionTimeout: timeout}\n\tstream, err := virtClient.VirtualMachineInstance(vmi.Namespace).SerialConsole(vmi.Name, opts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tconn := stream.AsConn()\n\tdefer conn.Close()\n\n\t_, err = fmt.Fprintf(conn, \"%s\\n\", command)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tscanner := bufio.NewScanner(conn)\n\tif !skipInput(scanner) {\n\t\treturn \"\", fmt.Errorf(\"failed to run [%s] at VMI %s (skip input)\", command, vmi.Name)\n\t}\n\tif !scanner.Scan() {\n\t\treturn \"\", fmt.Errorf(\"failed to run [%s] at VMI %s\", command, vmi.Name)\n\t}\n\treturn scanner.Text(), nil\n}",
"func runCommand(command string, commandPath string) (stdOut string, stdErr string, exitCode int, err error) {\n\treturn runCommandWithTimeout(command, commandPath, \"1h\")\n}",
"func (h *Host) ExecWithOutput(cmd string) (string, error) {\n\tsession, err := h.sshClient.NewSession()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer session.Close()\n\n\toutput, err := session.CombinedOutput(cmd)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\n\treturn strings.TrimSpace(string(output)), nil\n}",
"func (ssh *SSH) RunCmd(body string, expect *regexp.Regexp) (result string, err error) {\n\tif err = ssh.initializeSession(); err != nil {\n\t\treturn\n\t}\n\n\tssh.stdinBuf.Write([]byte(body + \"\\r\"))\n\n\tif expect != nil {\n\t\tresult, err = waitForExpected(ssh.stdoutBuf, expect)\n\t} else {\n\t\tresult, err = waitForExpected(ssh.stdoutBuf, ssh.prompt)\n\t}\n\tresult = ssh.prompt.ReplaceAllString(result, \"\")\n\tresult = ssh.utf8ArtefactRemover.ReplaceAllString(result, \"\")\n\tresult = ssh.nonASCIIremover.ReplaceAllString(result, \"\")\n\n\treturn result, err\n}",
"func RunCommand(vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) error {\n\terr := ExpectBatch(vmi, []expect.Batcher{\n\t\t&expect.BSnd{S: \"\\n\"},\n\t\t&expect.BExp{R: PromptExpression},\n\t\t&expect.BSnd{S: command + \"\\n\"},\n\t\t&expect.BExp{R: PromptExpression},\n\t\t&expect.BSnd{S: \"echo $?\\n\"},\n\t\t&expect.BCas{C: []expect.Caser{\n\t\t\t&expect.Case{\n\t\t\t\tR: ShellSuccessRegexp,\n\t\t\t\tT: expect.OK(),\n\t\t\t},\n\t\t\t&expect.Case{\n\t\t\t\tR: ShellFailRegexp,\n\t\t\t\tT: expect.Fail(expect.NewStatus(codes.Unavailable, command+\" failed\")),\n\t\t\t},\n\t\t}},\n\t}, timeout)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to run [%s] at VMI %s, error: %v\", command, vmi.Name, err)\n\t}\n\treturn nil\n}",
"func rcRunCommand(ctx context.Context, in Params) (out Params, err error) {\n\tcommand, err := in.GetString(\"command\")\n\tif err != nil {\n\t\tcommand = \"\"\n\t}\n\n\tvar opt = map[string]string{}\n\terr = in.GetStructMissingOK(\"opt\", &opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar arg = []string{}\n\terr = in.GetStructMissingOK(\"arg\", &arg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturnType, err := in.GetString(\"returnType\")\n\tif err != nil {\n\t\treturnType = \"COMBINED_OUTPUT\"\n\t}\n\n\tvar httpResponse http.ResponseWriter\n\thttpResponse, err = in.GetHTTPResponseWriter()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"response object is required\\n\" + err.Error())\n\t}\n\n\tvar allArgs = []string{}\n\tif command != \"\" {\n\t\t// Add the command e.g.: ls to the args\n\t\tallArgs = append(allArgs, command)\n\t}\n\t// Add all from arg\n\tallArgs = append(allArgs, arg...)\n\n\t// Add flags to args for e.g. --max-depth 1 comes in as { max-depth 1 }.\n\t// Convert it to [ max-depth, 1 ] and append to args list\n\tfor key, value := range opt {\n\t\tif len(key) == 1 {\n\t\t\tallArgs = append(allArgs, \"-\"+key)\n\t\t} else {\n\t\t\tallArgs = append(allArgs, \"--\"+key)\n\t\t}\n\t\tallArgs = append(allArgs, value)\n\t}\n\n\t// Get the path for the current executable which was used to run rclone.\n\tex, err := os.Executable()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcmd := exec.CommandContext(ctx, ex, allArgs...)\n\n\tif returnType == \"COMBINED_OUTPUT\" {\n\t\t// Run the command and get the output for error and stdout combined.\n\n\t\tout, err := cmd.CombinedOutput()\n\n\t\tif err != nil {\n\t\t\treturn Params{\n\t\t\t\t\"result\": string(out),\n\t\t\t\t\"error\": true,\n\t\t\t}, nil\n\t\t}\n\t\treturn Params{\n\t\t\t\"result\": string(out),\n\t\t\t\"error\": false,\n\t\t}, nil\n\t} else if returnType == \"STREAM_ONLY_STDOUT\" {\n\t\tcmd.Stdout = httpResponse\n\t} else if returnType == \"STREAM_ONLY_STDERR\" {\n\t\tcmd.Stderr = httpResponse\n\t} else if returnType == \"STREAM\" {\n\t\tcmd.Stdout = httpResponse\n\t\tcmd.Stderr = httpResponse\n\t} else {\n\t\treturn nil, fmt.Errorf(\"unknown returnType %q\", returnType)\n\t}\n\n\terr = cmd.Run()\n\treturn nil, err\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
RunCommandWithOutput is a wrapper around the SSH client to run a command abstracts the SSH connection details from the EC2 client interface RunCommandWithOutput provides the stdout and stderr from the command | func (ins *EC2RemoteClient) RunCommandWithOutput(cmd string) (exitStatus int, stdoutBuf bytes.Buffer, stderrBuf bytes.Buffer, err error) {
exitStatus, stdoutBuf, stderrBuf, err = ins.cmdClient.RunCommandWithOutput(cmd)
return exitStatus, stdoutBuf, stderrBuf, err
} | [
"func RunCommandWithOutput(session *ssh.Session, log logrus.FieldLogger, command string, w io.Writer) (err error) {\n\tdefer func() {\n\t\tif err != nil && session != nil {\n\t\t\terrClose := session.Close()\n\t\t\tif errClose != nil {\n\t\t\t\tlog.WithError(err).Error(\"failed to close SSH session\")\n\t\t\t}\n\t\t}\n\t}()\n\tvar stderr io.Reader\n\tstderr, err = session.StderrPipe()\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tvar stdout io.Reader\n\tstdout, err = session.StdoutPipe()\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\terrCh := make(chan error, 2)\n\tsink := make(chan string)\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\terrCh <- stream(stdout, sink)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\terrCh <- stream(stderr, sink)\n\t\twg.Done()\n\t}()\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errCh)\n\t}()\n\tgo func() {\n\t\tw := bufio.NewWriter(w)\n\t\tfor line := range sink {\n\t\t\t_, err := w.Write([]byte(line))\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"failed to write to w: %v\", err)\n\t\t\t}\n\t\t}\n\t\tw.Flush()\n\t\tclose(done)\n\t}()\n\n\terr = session.Start(command)\n\tif err != nil {\n\t\treturn trace.Wrap(err, \"failed to start %q\", command)\n\t}\n\n\terr = session.Wait()\n\tsession.Close()\n\tsession = nil // Avoid second close\n\tfor err := range errCh {\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to stream: %v\", err)\n\t\t}\n\t}\n\tclose(sink)\n\t<-done\n\n\treturn trace.Wrap(err)\n}",
"func (h *Host) ExecWithOutput(cmd string) (string, error) {\n\tsession, err := h.sshClient.NewSession()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer session.Close()\n\n\toutput, err := session.CombinedOutput(cmd)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\n\treturn strings.TrimSpace(string(output)), nil\n}",
"func (sc *sshclient) RunWithResults(address string, command string) (string, error) {\n\tclient, err := ssh.Dial(\"tcp\", address, sc.config)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Could not connect to address %s:%v \", address, err)\n\t}\n\n\t// Each ClientConn can support multiple interactive sessions,\n\t// represented by a Session.\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Could not create session at address %s:%v \", address, err)\n\t}\n\tdefer session.Close()\n\n\tresultdata, err := session.Output(command)\n\tif err != nil {\n\t\treturn string(resultdata), fmt.Errorf(\"Command '%s' at address %s produced an error:%v \", command, address, err)\n\t}\n\n\treturn string(resultdata), nil\n}",
"func (vtctldclient *VtctldClientProcess) ExecuteCommandWithOutput(args ...string) (string, error) {\n\tvar resultByte []byte\n\tvar resultStr string\n\tvar err error\n\tretries := 10\n\tretryDelay := 1 * time.Second\n\tpArgs := []string{\"--server\", vtctldclient.Server}\n\tif *isCoverage {\n\t\tpArgs = append(pArgs, \"--test.coverprofile=\"+getCoveragePath(\"vtctldclient-\"+args[0]+\".out\"), \"--test.v\")\n\t}\n\tpArgs = append(pArgs, args...)\n\tfor i := 1; i <= retries; i++ {\n\t\ttmpProcess := exec.Command(\n\t\t\tvtctldclient.Binary,\n\t\t\tfilterDoubleDashArgs(pArgs, vtctldclient.VtctldClientMajorVersion)...,\n\t\t)\n\t\tlog.Infof(\"Executing vtctldclient with command: %v (attempt %d of %d)\", strings.Join(tmpProcess.Args, \" \"), i, retries)\n\t\tresultByte, err = tmpProcess.CombinedOutput()\n\t\tresultStr = string(resultByte)\n\t\tif err == nil || !shouldRetry(resultStr) {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(retryDelay)\n\t}\n\treturn filterResultWhenRunsForCoverage(resultStr), err\n}",
"func RunCommandAndStoreOutput(vmi *v1.VirtualMachineInstance, command string, timeout time.Duration) (string, error) {\n\tvirtClient := kubevirt.Client()\n\n\topts := &kubecli.SerialConsoleOptions{ConnectionTimeout: timeout}\n\tstream, err := virtClient.VirtualMachineInstance(vmi.Namespace).SerialConsole(vmi.Name, opts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tconn := stream.AsConn()\n\tdefer conn.Close()\n\n\t_, err = fmt.Fprintf(conn, \"%s\\n\", command)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tscanner := bufio.NewScanner(conn)\n\tif !skipInput(scanner) {\n\t\treturn \"\", fmt.Errorf(\"failed to run [%s] at VMI %s (skip input)\", command, vmi.Name)\n\t}\n\tif !scanner.Scan() {\n\t\treturn \"\", fmt.Errorf(\"failed to run [%s] at VMI %s\", command, vmi.Name)\n\t}\n\treturn scanner.Text(), nil\n}",
"func ExecuteWithOutput(cmd *exec.Cmd) (outStr string, err error) {\n\t// connect to stdout and stderr for filtering purposes\n\terrPipe, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"cmd\": cmd.Args,\n\t\t}).Fatal(\"Couldn't connect to command's stderr\")\n\t}\n\toutPipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"cmd\": cmd.Args,\n\t\t}).Fatal(\"Couldn't connect to command's stdout\")\n\t}\n\t_ = bufio.NewReader(errPipe)\n\toutReader := bufio.NewReader(outPipe)\n\n\t// start the command and filter the output\n\tif err = cmd.Start(); err != nil {\n\t\treturn \"\", err\n\t}\n\toutScanner := bufio.NewScanner(outReader)\n\tfor outScanner.Scan() {\n\t\toutStr += outScanner.Text() + \"\\n\"\n\t\tif log.GetLevel() == log.DebugLevel {\n\t\t\tfmt.Println(outScanner.Text())\n\t\t}\n\t}\n\terr = cmd.Wait()\n\treturn outStr, err\n}",
"func RunSSHCommand(SSHHandle *ssh.Client, cmd string, sudo bool, bg bool, logger *log.Logger) (retCode int, stdout, stderr []string) {\n\tlogger.Println(\"Running cmd \" + cmd)\n\tvar stdoutBuf, stderrBuf bytes.Buffer\n\tsshSession, err := SSHHandle.NewSession()\n\tif err != nil {\n\t\tlogger.Printf(\"SSH session creation failed! %s\", err)\n\t\treturn -1, nil, nil\n\t}\n\tdefer sshSession.Close()\n\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 0, // disable echoing\n\t\tssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud\n\t\tssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud\n\t}\n\n\tif err = sshSession.RequestPty(\"xterm\", 80, 40, modes); err != nil {\n\t\tlogger.Println(\"SSH session Pty creation failed!\")\n\t\treturn -1, nil, nil\n\t}\n\n\tsshOut, err := sshSession.StdoutPipe()\n\tif err != nil {\n\t\tlogger.Println(\"SSH session StdoutPipe creation failed!\")\n\t\treturn -1, nil, nil\n\t}\n\tsshErr, err := sshSession.StderrPipe()\n\tif err != nil {\n\t\tlogger.Println(\"SSH session StderrPipe creation failed!\")\n\t\treturn -1, nil, nil\n\t}\n\n\tshout := io.MultiWriter(&stdoutBuf, (*LogWriter)(logger))\n\tssherr := io.MultiWriter(&stderrBuf, (*LogWriter)(logger))\n\n\tgo func() {\n\t\tio.Copy(shout, sshOut)\n\t}()\n\tgo func() {\n\n\t\tio.Copy(ssherr, sshErr)\n\t}()\n\n\tif bg {\n\t\tcmd = \"nohup sh -c \\\"\" + cmd + \" 2>&1 >/dev/null </dev/null & \\\"\"\n\t} else {\n\t\tcmd = \"sh -c \\\"\" + cmd + \"\\\"\"\n\t}\n\n\tif sudo {\n\t\tcmd = SudoCmd(cmd)\n\t}\n\n\tlogger.Println(\"Running command : \" + cmd)\n\tif err = sshSession.Run(cmd); err != nil {\n\t\tlogger.Println(\"failed command : \" + cmd)\n\t\tswitch v := err.(type) {\n\t\tcase *ssh.ExitError:\n\t\t\tretCode = v.Waitmsg.ExitStatus()\n\t\tdefault:\n\t\t\tretCode = -1\n\t\t}\n\t} else {\n\t\tlogger.Println(\"sucess command : \" + cmd)\n\t\tretCode = 0\n\t}\n\n\tstdout = strings.Split(stdoutBuf.String(), \"\\n\")\n\tstderr = strings.Split(stderrBuf.String(), \"\\n\")\n\tlogger.Println(stdout)\n\tlogger.Println(stderr)\n\tlogger.Println(\"Return code : \" + strconv.Itoa(retCode))\n\n\treturn retCode, stdout, stderr\n\n}",
"func (client *Client) Run(command string, silent bool) (output string, err error) {\n\tkeys := ssh.Auth{\n\t\tKeys: []string{client.PrivateKeyFile},\n\t}\n\tsshterm, err := ssh.NewNativeClient(client.User, client.Host, \"SSH-2.0-MyCustomClient-1.0\", client.Port, &keys, nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to request shell - %s\", err)\n\t}\n\tif !silent {\n\t\tlog.Printf(\"Running: ssh -i \\\"%s\\\" %s@%s %s\", client.PrivateKeyFile, client.User, client.Host, command)\n\t}\n\tr, _, err := sshterm.Start(command)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to start command - %s\", err)\n\t}\n\tsshterm.Wait()\n\tresponse, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to read response - %s\", err)\n\t}\n\treturn string(response), nil\n}",
"func (e *SSHExecutor) Execute(cmd string, sudo bool, timeout ...time.Duration) ([]byte, []byte, error) {\n\t// try to acquire root permission\n\tif e.Sudo || sudo {\n\t\tcmd = fmt.Sprintf(\"sudo -H -u root bash -c \\\"%s\\\"\", cmd)\n\t}\n\n\t// set a basic PATH in case it's empty on login\n\tcmd = fmt.Sprintf(\"PATH=$PATH:/usr/bin:/usr/sbin %s\", cmd)\n\n\tif e.Locale != \"\" {\n\t\tcmd = fmt.Sprintf(\"export LANG=%s; %s\", e.Locale, cmd)\n\t}\n\n\t// run command on remote host\n\t// default timeout is 60s in easyssh-proxy\n\tif len(timeout) == 0 {\n\t\ttimeout = append(timeout, executeDefaultTimeout)\n\t}\n\n\tstdout, stderr, done, err := e.Config.Run(cmd, timeout...)\n\n\tzap.L().Info(\"SSHCommand\",\n\t\tzap.String(\"host\", e.Config.Server),\n\t\tzap.String(\"port\", e.Config.Port),\n\t\tzap.String(\"cmd\", cmd),\n\t\tzap.Error(err),\n\t\tzap.String(\"stdout\", stdout),\n\t\tzap.String(\"stderr\", stderr))\n\n\tif err != nil {\n\t\tbaseErr := ErrSSHExecuteFailed.\n\t\t\tWrap(err, \"Failed to execute command over SSH for '%s@%s:%s'\", e.Config.User, e.Config.Server, e.Config.Port).\n\t\t\tWithProperty(ErrPropSSHCommand, cmd).\n\t\t\tWithProperty(ErrPropSSHStdout, stdout).\n\t\t\tWithProperty(ErrPropSSHStderr, stderr)\n\t\tif len(stdout) > 0 || len(stderr) > 0 {\n\t\t\toutput := strings.TrimSpace(strings.Join([]string{stdout, stderr}, \"\\n\"))\n\t\t\tbaseErr = baseErr.\n\t\t\t\tWithProperty(cliutil.SuggestionFromFormat(\"Command output on remote host %s:\\n%s\\n\",\n\t\t\t\t\te.Config.Server,\n\t\t\t\t\tcolor.YellowString(output)))\n\t\t}\n\t\treturn []byte(stdout), []byte(stderr), baseErr\n\t}\n\n\tif !done { // timeout case,\n\t\treturn []byte(stdout), []byte(stderr), ErrSSHExecuteTimedout.\n\t\t\tWrap(err, \"Execute command over SSH timedout for '%s@%s:%s'\", e.Config.User, e.Config.Server, e.Config.Port).\n\t\t\tWithProperty(ErrPropSSHCommand, cmd).\n\t\t\tWithProperty(ErrPropSSHStdout, stdout).\n\t\t\tWithProperty(ErrPropSSHStderr, stderr)\n\t}\n\n\treturn []byte(stdout), []byte(stderr), nil\n}",
"func (v *vcsCmd) runOutput(dir string, cmd string, keyval ...string) ([]byte, error) {\n\treturn v.run1(dir, cmd, keyval, true)\n}",
"func SSHRunCommand(client *ssh.Client, cmd string) (string, error) {\n\tsession, err := client.NewSession()\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer session.Close()\n\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tsession.Stdout = &stdout\n\tsession.Stderr = &stderr\n\n\terr = session.Run(cmd)\n\n\toutput := stdout.String() + stderr.String()\n\n\tif err != nil {\n\t\treturn output, fmt.Errorf(\"failed to execute command \\\"%s\\\" via SSH client: %s\", cmd, err)\n\t}\n\n\treturn output, nil\n}",
"func (ipt *IPTablesExecution) runWithOutput(args []string, stdout io.Writer) error {\n\tsh := \"/bin/sh\"\n\targs = append([]string{sh, \"-c\"}, args...)\n\t//\targs = append([]string{ipt.path}, args...)\n\t//\tif ipt.hasWait {\n\t//\t\targs = append(args, \"--wait\")\n\t//\t} else {\n\t//\t\tfmu, err := newXtablesFileLock()\n\t//\t\tif err != nil {\n\t//\t\t\treturn err\n\t//\t\t}\n\t//\t\tul, err := fmu.tryLock()\n\t//\t\tif err != nil {\n\t//\t\t\tsyscall.Close(fmu.fd)\n\t//\t\t\treturn err\n\t//\t\t}\n\t//\t\tdefer ul.Unlock()\n\t//\t}\n\n\tvar stderr bytes.Buffer\n\tcmd := exec.Cmd{\n\t\tPath:/* ipt.path */ sh,\n\t\tArgs: args,\n\t\tStdout: stdout,\n\t\tStderr: &stderr,\n\t}\n\n\tif err := cmd.Run(); err != nil {\n\t\tswitch e := err.(type) {\n\t\tcase *exec.ExitError:\n\t\t\t//\t\t\treturn &Error{*e, cmd, stderr.String(), nil}\n\t\t\tstatus := (*e).Sys().(syscall.WaitStatus).ExitStatus()\n\t\t\treturn fmt.Errorf(\"running %v: exit status %v: %v\", cmd.Args, status, stderr.String())\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func RunWithOutput(cmd *exec.Cmd, name string, settings ...SettingsFunc) string {\n\tr := DefaultRunner()\n\treturn r.RunWithOutput(cmd, name, settings...)\n}",
"func (ins *EC2RemoteClient) RunCommand(cmd string) (exitStatus int, err error) {\n\texitStatus, err = ins.cmdClient.RunCommand(cmd)\n\treturn exitStatus, err\n}",
"func ExecCommandOutput(cmd string, args []string) (string, int, error) {\n\tLogDebug.Print(\"ExecCommandOutput called with \", cmd, args)\n\tc := exec.Command(cmd, args...)\n\tvar b bytes.Buffer\n\tc.Stdout = &b\n\tc.Stderr = &b\n\n\tif err := c.Start(); err != nil {\n\t\treturn \"\", 999, err\n\t}\n\n\t//TODO we could set a timeout here if needed\n\n\terr := c.Wait()\n\tout := string(b.Bytes())\n\n\tfor _, line := range strings.Split(out, \"\\n\") {\n\t\tLogDebug.Print(\"out :\", line)\n\t}\n\n\tif err != nil {\n\t\t//check the rc of the exec\n\t\tif badnews, ok := err.(*exec.ExitError); ok {\n\t\t\tif status, ok := badnews.Sys().(syscall.WaitStatus); ok {\n\t\t\t\treturn out, status.ExitStatus(), fmt.Errorf(\"rc=%d\", status.ExitStatus())\n\t\t\t}\n\t\t} else {\n\t\t\treturn out, 888, fmt.Errorf(\"unknown error\")\n\t\t}\n\t}\n\n\treturn out, 0, nil\n}",
"func runCommandWithOutput(prog string, args ...string) (string, error) {\n\tcmd := exec.Command(prog, args...)\n\tvar outb, errb bytes.Buffer\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = &outb\n\tcmd.Stderr = &errb\n\tif err := cmd.Run(); err != nil {\n\t\ttext := prog + \" \" + strings.Join(args, \" \")\n\t\treturn \"\", fmt.Errorf(\"Failed to run command %s due to error %v\", text, err)\n\t}\n\tanswer := outb.String()\n\tif len(answer) == 0 {\n\t\tanswer = errb.String()\n\t}\n\treturn answer, nil\n}",
"func RunCommandWithOutputLiveWrapper(c *OSCommand, command string, output func(string) string) error {\n\treturn c.RunCommand(command)\n}",
"func (commandRunner CommandRunner) RunReturnOutput(cmdLine []string, workdir string, env map[string]string) (string, error) {\n\n\tcommand, err := commandRunner.createCommand(cmdLine, workdir, env)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcommandRunner.logCommand(cmdLine, env)\n\n\toutput, err := command.CombinedOutput()\n\tif err != nil && log.DebugMode {\n\t\tlog.Error(err)\n\t}\n\n\treturn strings.TrimSpace(string(output)), err\n\n}",
"func RunCommandWithOutputLiveWrapper(c *OSCommand, command string, output func(string) string) error {\n\tcmd := c.ExecutableFromString(command)\n\tcmd.Env = append(cmd.Env, \"LANG=en_US.UTF-8\", \"LC_ALL=en_US.UTF-8\")\n\n\tvar stderr bytes.Buffer\n\tcmd.Stderr = &stderr\n\n\tptmx, err := pty.Start(cmd)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgo func() {\n\t\tscanner := bufio.NewScanner(ptmx)\n\t\tscanner.Split(scanWordsWithNewLines)\n\t\tfor scanner.Scan() {\n\t\t\ttoOutput := strings.Trim(scanner.Text(), \" \")\n\t\t\t_, _ = ptmx.WriteString(output(toOutput))\n\t\t}\n\t}()\n\n\terr = cmd.Wait()\n\tptmx.Close()\n\tif err != nil {\n\t\treturn errors.New(stderr.String())\n\t}\n\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
BackgroundCommand is a wrapper around the SSH client to run a command abstracts the SSH connection details from the EC2 client interface | func (ins *EC2RemoteClient) BackgroundCommand(cmd string, discardOutput bool) (int, error) {
exitStatus, err := ins.cmdClient.BackgroundCommand(cmd, discardOutput)
return exitStatus, err
} | [
"func (client *SSHClient) RunCommandInBackground(ctx context.Context, cmd *SSHCommand) error {\n\tif ctx == nil {\n\t\tpanic(\"nil context provided to RunCommandInBackground()\")\n\t}\n\n\tsession, err := client.newSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 1, // enable echoing\n\t\tssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud\n\t\tssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud\n\t}\n\tsession.RequestPty(\"xterm-256color\", 80, 80, modes)\n\n\tstdin, err := session.StdinPipe()\n\tif err != nil {\n\t\tlog.Errorf(\"Could not get stdin: %s\", err)\n\t}\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\t_, err := stdin.Write([]byte{3})\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"write ^C error: %s\", err)\n\t\t}\n\t\terr = session.Wait()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"wait error: %s\", err)\n\t\t}\n\t\tif err = session.Signal(ssh.SIGHUP); err != nil {\n\t\t\tlog.Errorf(\"failed to kill command: %s\", err)\n\t\t}\n\t\tif err = session.Close(); err != nil {\n\t\t\tlog.Errorf(\"failed to close session: %s\", err)\n\t\t}\n\t}()\n\t_, err = runCommand(session, cmd)\n\treturn err\n}",
"func ExternalCommandBackground(cmdString string) (*exec.Cmd, error) {\n\tvar errbuf bytes.Buffer\n\tcmd := splitString(cmdString)\n\tcmd.Stderr = &errbuf\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn cmd, MyError{cmdString, \"\", errbuf.String(), err}\n\t}\n\treturn cmd, nil\n}",
"func cmdSSH() {\n\tswitch state := status(B2D.VM); state {\n\tcase vmUnregistered:\n\t\tlog.Fatalf(\"%s is not registered.\", B2D.VM)\n\tcase vmRunning:\n\t\tcmdParts := append(strings.Fields(B2D.SSHPrefix), fmt.Sprintf(\"%d\", B2D.SSHPort), \"docker@localhost\")\n\t\tif err := cmd(cmdParts[0], cmdParts[1:]...); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tdefault:\n\t\tlog.Fatalf(\"%s is not running.\", B2D.VM)\n\t}\n}",
"func externalSSHCommand(o commandOptions) (*exec.Cmd, error) {\n\tvar execArgs []string\n\n\t// Don't check the host certificate as part of the testing an external SSH\n\t// client, this is done elsewhere.\n\texecArgs = append(execArgs, \"-oStrictHostKeyChecking=no\")\n\texecArgs = append(execArgs, \"-oUserKnownHostsFile=/dev/null\")\n\n\t// ControlMaster is often used by applications like Ansible.\n\tif o.controlPath != \"\" {\n\t\texecArgs = append(execArgs, \"-oControlMaster=auto\")\n\t\texecArgs = append(execArgs, \"-oControlPersist=1s\")\n\t\texecArgs = append(execArgs, \"-oConnectTimeout=2\")\n\t\texecArgs = append(execArgs, fmt.Sprintf(\"-oControlPath=%v\", o.controlPath))\n\t}\n\n\t// The -tt flag is used to force PTY allocation. It's often used by\n\t// applications like Ansible.\n\tif o.forcePTY {\n\t\texecArgs = append(execArgs, \"-tt\")\n\t}\n\n\t// Connect to node on the passed in port.\n\texecArgs = append(execArgs, fmt.Sprintf(\"-p %v\", o.nodePort))\n\n\t// Build proxy command.\n\tproxyCommand := []string{\"ssh\"}\n\tproxyCommand = append(proxyCommand, \"-oStrictHostKeyChecking=no\")\n\tproxyCommand = append(proxyCommand, \"-oUserKnownHostsFile=/dev/null\")\n\tif o.forwardAgent {\n\t\tproxyCommand = append(proxyCommand, \"-oForwardAgent=yes\")\n\t}\n\tproxyCommand = append(proxyCommand, fmt.Sprintf(\"-p %v\", o.proxyPort))\n\tproxyCommand = append(proxyCommand, `%r@localhost -s proxy:%h:%p`)\n\n\t// Add in ProxyCommand option, needed for all Teleport connections.\n\texecArgs = append(execArgs, fmt.Sprintf(\"-oProxyCommand=%v\", strings.Join(proxyCommand, \" \")))\n\n\t// Add in the host to connect to and the command to run when connected.\n\texecArgs = append(execArgs, Host)\n\texecArgs = append(execArgs, o.command)\n\n\t// Find the OpenSSH binary.\n\tsshpath, err := exec.LookPath(\"ssh\")\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\n\t// Create an exec.Command and tell it where to find the SSH agent.\n\tcmd, err := exec.Command(sshpath, execArgs...), nil\n\tif err != nil {\n\t\treturn nil, trace.Wrap(err)\n\t}\n\tcmd.Env = []string{fmt.Sprintf(\"SSH_AUTH_SOCK=%v\", o.socketPath)}\n\n\treturn cmd, nil\n}",
"func SSH(params *SSHInput) error {\n\t// read the manifest\n\tzoneManifest, err := ReadManifest(params.ManifestPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tborderEIP, err := getTerraformOutput(\"border_eip\", zoneManifest.TerraformState)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs := []string{\n\t\t\"-q\",\n\t\t\"-o\",\n\t\tfmt.Sprintf(\n\t\t\t\"ProxyCommand=ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %%h:%%p ubuntu@%s\", borderEIP),\n\t\t\"-o\", \"UserKnownHostsFile=/dev/null\",\n\t\t\"-o\", \"StrictHostKeyChecking=no\",\n\t\tfmt.Sprintf(\"ubuntu@%s\", params.Host),\n\t}\n\targs = append(args, params.Args...)\n\n\tcmd := exec.Command(\"ssh\", args...)\n\tcmd.Env = os.Environ()\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}",
"func RunSSHCommand(SSHHandle *ssh.Client, cmd string, sudo bool, bg bool, logger *log.Logger) (retCode int, stdout, stderr []string) {\n\tlogger.Println(\"Running cmd \" + cmd)\n\tvar stdoutBuf, stderrBuf bytes.Buffer\n\tsshSession, err := SSHHandle.NewSession()\n\tif err != nil {\n\t\tlogger.Printf(\"SSH session creation failed! %s\", err)\n\t\treturn -1, nil, nil\n\t}\n\tdefer sshSession.Close()\n\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 0, // disable echoing\n\t\tssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud\n\t\tssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud\n\t}\n\n\tif err = sshSession.RequestPty(\"xterm\", 80, 40, modes); err != nil {\n\t\tlogger.Println(\"SSH session Pty creation failed!\")\n\t\treturn -1, nil, nil\n\t}\n\n\tsshOut, err := sshSession.StdoutPipe()\n\tif err != nil {\n\t\tlogger.Println(\"SSH session StdoutPipe creation failed!\")\n\t\treturn -1, nil, nil\n\t}\n\tsshErr, err := sshSession.StderrPipe()\n\tif err != nil {\n\t\tlogger.Println(\"SSH session StderrPipe creation failed!\")\n\t\treturn -1, nil, nil\n\t}\n\n\tshout := io.MultiWriter(&stdoutBuf, (*LogWriter)(logger))\n\tssherr := io.MultiWriter(&stderrBuf, (*LogWriter)(logger))\n\n\tgo func() {\n\t\tio.Copy(shout, sshOut)\n\t}()\n\tgo func() {\n\n\t\tio.Copy(ssherr, sshErr)\n\t}()\n\n\tif bg {\n\t\tcmd = \"nohup sh -c \\\"\" + cmd + \" 2>&1 >/dev/null </dev/null & \\\"\"\n\t} else {\n\t\tcmd = \"sh -c \\\"\" + cmd + \"\\\"\"\n\t}\n\n\tif sudo {\n\t\tcmd = SudoCmd(cmd)\n\t}\n\n\tlogger.Println(\"Running command : \" + cmd)\n\tif err = sshSession.Run(cmd); err != nil {\n\t\tlogger.Println(\"failed command : \" + cmd)\n\t\tswitch v := err.(type) {\n\t\tcase *ssh.ExitError:\n\t\t\tretCode = v.Waitmsg.ExitStatus()\n\t\tdefault:\n\t\t\tretCode = -1\n\t\t}\n\t} else {\n\t\tlogger.Println(\"sucess command : \" + cmd)\n\t\tretCode = 0\n\t}\n\n\tstdout = strings.Split(stdoutBuf.String(), \"\\n\")\n\tstderr = strings.Split(stderrBuf.String(), \"\\n\")\n\tlogger.Println(stdout)\n\tlogger.Println(stderr)\n\tlogger.Println(\"Return code : \" + strconv.Itoa(retCode))\n\n\treturn retCode, stdout, stderr\n\n}",
"func (client *Client) Run(command string, silent bool) (output string, err error) {\n\tkeys := ssh.Auth{\n\t\tKeys: []string{client.PrivateKeyFile},\n\t}\n\tsshterm, err := ssh.NewNativeClient(client.User, client.Host, \"SSH-2.0-MyCustomClient-1.0\", client.Port, &keys, nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to request shell - %s\", err)\n\t}\n\tif !silent {\n\t\tlog.Printf(\"Running: ssh -i \\\"%s\\\" %s@%s %s\", client.PrivateKeyFile, client.User, client.Host, command)\n\t}\n\tr, _, err := sshterm.Start(command)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to start command - %s\", err)\n\t}\n\tsshterm.Wait()\n\tresponse, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to read response - %s\", err)\n\t}\n\treturn string(response), nil\n}",
"func (h *Host) SSH(cmds []string, resultVar string, stdout io.Writer, eo ExecOption) (err error) {\n\tif h.client == nil {\n\t\tif h.client, err = h.GetGosshConnect(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := h.setupSession(stdout); err != nil {\n\t\treturn errors.Wrapf(err, \"setupSession\")\n\t}\n\n\tfor _, cmd := range cmds {\n\t\twrap := CmdWrap{Cmd: h.SubstituteResultVars(cmd), ResultVar: resultVar, ExecOption: eo}\n\t\th.cmdChan <- wrap\n\t\th.waitCmdExecuted(wrap)\n\t}\n\n\treturn nil\n}",
"func executeCmd(remote_host string) string {\n\treadConfig(\"config\")\n\t//\tconfig, err := sshAuthKey(Conf.SshUser, Conf.SshKeyPath, Conf.Passphrase)\n\t//\tif err != nil {\n\t//\t\tlog.Fatal(err)\n\t//\t}\n\tvar config *ssh.ClientConfig\n\n\tif Conf.Method == \"password\" {\n\t\tconfig = sshAuthPassword(Conf.SshUser, Conf.SshPassword)\n\t} else if Conf.Method == \"key\" {\n\t\tconfig = sshAuthKey(Conf.SshUser, Conf.SshKeyPath, Conf.Passphrase)\n\t\t//\t\tif err != nil {\n\t\t//\t\t\tlog.Fatal(err)\n\t\t//\t\t}\n\t} else {\n\t\tlog.Fatal(`Please set method \"password\" or \"key\" at configuration file`)\n\t}\n\n\tclient, err := ssh.Dial(\"tcp\", remote_host+\":\"+Conf.SshPort, config)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to dial: \", err)\n\t}\n\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to create session: \", err)\n\t}\n\tdefer session.Close()\n\n\tvar b bytes.Buffer\n\tsession.Stdout = &b\n\tif err := session.Run(Conf.Command); err != nil {\n\t\tlog.Fatal(\"Failed to run: \" + err.Error())\n\t}\n\tfmt.Println(\"\\x1b[31;1m\" + remote_host + \"\\x1b[0m\")\n\treturn b.String()\n}",
"func sshAgent() (io.ReadWriteCloser, error) {\r\n cmd := exec.Command(\"wsl\", \"bash\", \"-c\", \"PS1=x source ~/.bashrc; socat - UNIX:\\\\$SSH_AUTH_SOCK\")\r\n stdin, err := cmd.StdinPipe()\r\n if err != nil {\r\n return nil, err\r\n }\r\n stdout, err := cmd.StdoutPipe()\r\n if err != nil {\r\n return nil, err\r\n }\r\n if err := cmd.Start(); err != nil {\r\n return nil, err\r\n }\r\n return &sshAgentCmd{stdout, stdin, cmd}, nil\r\n}",
"func (p *PromProxy) executeCmdBackground(logger *logging.BaseLogger, format string, args ...interface{}) (*os.Process, error) {\n\tcmd := fmt.Sprintf(format, args...)\n\tlogger.Infof(\"Executing command: %s\", cmd)\n\tparts := strings.Split(cmd, \" \")\n\tc := exec.Command(parts[0], parts[1:]...) // #nosec\n\terr := c.Start()\n\tif err != nil {\n\t\tlogger.Errorf(\"%s, command failed!\", cmd)\n\t\treturn nil, err\n\t}\n\treturn c.Process, nil\n}",
"func (s *SSHOrch) ExecSSH(userserver, cmd string) string {\n\tclient := s.doLookup(userserver)\n\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\tlog.Fatalln(\"Failed to create session:\", err)\n\t}\n\tdefer session.Close()\n\t/*\n\t\tstdout, err := session.StdoutPipe()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Failed to pipe session stdout:\", err)\n\t\t}\n\n\t\tstderr, err := session.StderrPipe()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Failed to pipe session stderr:\", err)\n\t\t}\n\t*/\n\n\tbuf, err := session.CombinedOutput(cmd)\n\tif err != nil {\n\t\tlog.Fatalln(\"Failed to execute cmd:\", err)\n\t}\n\n\t// Network read pushed to background\n\t/*readExec := func(r io.Reader, ch chan []byte) {\n\t\tif str, err := ioutil.ReadAll(r); err != nil {\n\t\t\tch <- str\n\t\t}\n\t}\n\toutCh := make(chan []byte)\n\tgo readExec(stdout, outCh)\n\t*/\n\treturn string(buf)\n}",
"func (e *SSHExecutor) Execute(cmd string, sudo bool, timeout ...time.Duration) ([]byte, []byte, error) {\n\t// try to acquire root permission\n\tif e.Sudo || sudo {\n\t\tcmd = fmt.Sprintf(\"sudo -H -u root bash -c \\\"%s\\\"\", cmd)\n\t}\n\n\t// set a basic PATH in case it's empty on login\n\tcmd = fmt.Sprintf(\"PATH=$PATH:/usr/bin:/usr/sbin %s\", cmd)\n\n\tif e.Locale != \"\" {\n\t\tcmd = fmt.Sprintf(\"export LANG=%s; %s\", e.Locale, cmd)\n\t}\n\n\t// run command on remote host\n\t// default timeout is 60s in easyssh-proxy\n\tif len(timeout) == 0 {\n\t\ttimeout = append(timeout, executeDefaultTimeout)\n\t}\n\n\tstdout, stderr, done, err := e.Config.Run(cmd, timeout...)\n\n\tzap.L().Info(\"SSHCommand\",\n\t\tzap.String(\"host\", e.Config.Server),\n\t\tzap.String(\"port\", e.Config.Port),\n\t\tzap.String(\"cmd\", cmd),\n\t\tzap.Error(err),\n\t\tzap.String(\"stdout\", stdout),\n\t\tzap.String(\"stderr\", stderr))\n\n\tif err != nil {\n\t\tbaseErr := ErrSSHExecuteFailed.\n\t\t\tWrap(err, \"Failed to execute command over SSH for '%s@%s:%s'\", e.Config.User, e.Config.Server, e.Config.Port).\n\t\t\tWithProperty(ErrPropSSHCommand, cmd).\n\t\t\tWithProperty(ErrPropSSHStdout, stdout).\n\t\t\tWithProperty(ErrPropSSHStderr, stderr)\n\t\tif len(stdout) > 0 || len(stderr) > 0 {\n\t\t\toutput := strings.TrimSpace(strings.Join([]string{stdout, stderr}, \"\\n\"))\n\t\t\tbaseErr = baseErr.\n\t\t\t\tWithProperty(cliutil.SuggestionFromFormat(\"Command output on remote host %s:\\n%s\\n\",\n\t\t\t\t\te.Config.Server,\n\t\t\t\t\tcolor.YellowString(output)))\n\t\t}\n\t\treturn []byte(stdout), []byte(stderr), baseErr\n\t}\n\n\tif !done { // timeout case,\n\t\treturn []byte(stdout), []byte(stderr), ErrSSHExecuteTimedout.\n\t\t\tWrap(err, \"Execute command over SSH timedout for '%s@%s:%s'\", e.Config.User, e.Config.Server, e.Config.Port).\n\t\t\tWithProperty(ErrPropSSHCommand, cmd).\n\t\t\tWithProperty(ErrPropSSHStdout, stdout).\n\t\t\tWithProperty(ErrPropSSHStderr, stderr)\n\t}\n\n\treturn []byte(stdout), []byte(stderr), nil\n}",
"func (client *ExternalClient) Shell(pty bool, args ...string) error {\n\targs = append(client.BaseArgs, args...)\n\tcmd := getSSHCmd(client.BinaryPath, pty, args...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}",
"func (s *Server) DoBackground(target string, cmds ...Command) error {\n\t// ensure command id is generated\n\tfor _, cmd := range cmds {\n\t\tcmd.ID = randomCommandID()\n\n\t\t// put in command queue\n\t\tif err := s.putCommandQueue(target, cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// put in callback list\n\t\ts.registerCommandCallback(cmd.ID, cmd)\n\t}\n\n\treturn nil\n}",
"func ExecBackgroundCtx(ctx context.Context, cmd *cobra.Command, stdIn io.Reader, stdOut, stdErr io.Writer, args ...string) *errgroup.Group {\n\tcmd.SetIn(stdIn)\n\tcmd.SetOut(io.MultiWriter(stdOut, debugStdout))\n\tcmd.SetErr(io.MultiWriter(stdErr, debugStderr))\n\n\tif args == nil {\n\t\targs = []string{}\n\t}\n\tcmd.SetArgs(args)\n\n\teg := &errgroup.Group{}\n\teg.Go(func() error {\n\t\tdefer cmd.SetIn(nil)\n\t\treturn cmd.ExecuteContext(ctx)\n\t})\n\n\treturn eg\n}",
"func (sc *sshclient) RunInterativeShell(address string) {\n\tsig := make(chan os.Signal, 1)\n\tsignal.Notify(sig, syscall.SIGTERM, syscall.SIGINT)\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tgo func() {\n\t\tif err := sc.runclient(ctx, address); err != nil {\n\t\t\tkuttilog.Print(0, err)\n\t\t}\n\t\tcancel()\n\t}()\n\n\tselect {\n\tcase <-sig:\n\t\tcancel()\n\tcase <-ctx.Done():\n\t}\n}",
"func (h *Host) Exec(cmd string) error {\n\tsession, err := h.sshClient.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\n\tstdout, err := session.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tstderr, err := session.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Debugf(\"executing command: %s\", cmd)\n\tif err := session.Start(cmd); err != nil {\n\t\treturn err\n\t}\n\n\tmultiReader := io.MultiReader(stdout, stderr)\n\toutputScanner := bufio.NewScanner(multiReader)\n\n\tfor outputScanner.Scan() {\n\t\tlogrus.Debugf(\"%s: %s\", h.FullAddress(), outputScanner.Text())\n\t}\n\tif err := outputScanner.Err(); err != nil {\n\t\tlogrus.Errorf(\"%s: %s\", h.FullAddress(), err.Error())\n\t}\n\n\treturn nil\n}",
"func (bc *BaseCluster) SSH(m Machine, cmd string) ([]byte, []byte, error) {\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tclient, err := bc.SSHClient(m.IP())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer client.Close()\n\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer session.Close()\n\n\tsession.Stdout = &stdout\n\tsession.Stderr = &stderr\n\terr = session.Run(cmd)\n\toutBytes := bytes.TrimSpace(stdout.Bytes())\n\terrBytes := bytes.TrimSpace(stderr.Bytes())\n\treturn outBytes, errBytes, err\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
CopyFile copies a file from the local filesystem to that on the EC2 instance | func (ins *EC2RemoteClient) CopyFile(source string, destination string) error {
err := ins.cmdClient.CopyFile(source, destination)
return err
} | [
"func copyFile(srcFile string, destFile string) error {\n\tsrcReader, err := assets.FS.Open(srcFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error opening %s file: %w\", srcFile, err)\n\t}\n\tdefer srcReader.Close()\n\n\tdestReader, err := os.Create(destFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating runtime %s file: %w\", destFile, err)\n\t}\n\tdefer destReader.Close()\n\n\t_, err = io.Copy(destReader, srcReader)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error copying source file to destination file: %w\", err)\n\t}\n\n\treturn nil\n}",
"func CopyFile(c *gin.Context) {\n\tparamContentPath := c.Param(\"contentPath\")\n\tparamFrom := c.Query(\"from\")\n\n\tif paramFrom == \"\" {\n\t\tc.String(http.StatusBadRequest, \"param `from` is required\")\n\t\treturn\n\t}\n\n\tif err := EnsureSecurePaths(paramContentPath, paramFrom); err != nil {\n\t\tlog.Println(\"checkpath failed:\", err)\n\t\tc.String(http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\n\tfullDstPath := path.Join(MountedVolume, paramContentPath)\n\tfullSrcPath := path.Join(MountedVolume, paramFrom)\n\n\tif err := manager.CopyFile(fullSrcPath, fullDstPath); err != nil {\n\t\tlog.Println(err)\n\t\tc.String(http.StatusBadRequest, err.Error())\n\t\treturn\n\t}\n\tc.String(http.StatusCreated, \"Copied\")\n}",
"func (h *Host) CopyFile(file os.File, remotePath string, permissions string) error {\n\tsession, err := h.sshClient.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\n\tscpClient := scp.Client{\n\t\tSession: session,\n\t\tTimeout: time.Second * 60,\n\t\tRemoteBinary: \"scp\",\n\t}\n\n\treturn scpClient.CopyFromFile(file, remotePath, permissions)\n}",
"func (n *mockAgent) copyFile(src, dst string) error {\n\treturn nil\n}",
"func (c *CollectHostCopy) copyFile(src, dst string, result CollectorResult) error {\n\tin, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer in.Close()\n\n\trelDest := c.relBundlePath(dst)\n\terr = result.SaveResult(c.BundlePath, relDest, in)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func CopyFile(source, destination string) error {\n\tin, err := os.Open(source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tout, err := os.Create(destination)\n\tif err != nil {\n\t\t_ = in.Close()\n\t\treturn err\n\t}\n\n\t_, err = io.Copy(out, in)\n\tif err != nil {\n\t\t_ = in.Close()\n\t\t_ = out.Close()\n\t\treturn err\n\t}\n\n\t_ = in.Close()\n\treturn out.Close()\n}",
"func (a *DefaultClient) Copy(srcFile, tgtFile vfs.File) error {\n\t// Can't use url.PathEscape here since that will escape everything (even the directory separators)\n\tsrcURL, err := url.Parse(strings.Replace(srcFile.URI(), \"%\", \"%25\", -1))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttgtURL, err := url.Parse(tgtFile.Location().(*Location).ContainerURL())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontainerURL := azblob.NewContainerURL(*tgtURL, a.pipeline)\n\tblobURL := containerURL.NewBlockBlobURL(utils.RemoveLeadingSlash(tgtFile.Path()))\n\tctx := context.Background()\n\tresp, err := blobURL.StartCopyFromURL(ctx, *srcURL, azblob.Metadata{}, azblob.ModifiedAccessConditions{},\n\t\tazblob.BlobAccessConditions{}, azblob.DefaultAccessTier, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor resp.CopyStatus() == azblob.CopyStatusPending {\n\t\ttime.Sleep(2 * time.Second)\n\t}\n\n\tif resp.CopyStatus() == azblob.CopyStatusSuccess {\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"copy failed ERROR[%s]\", resp.ErrorCode())\n}",
"func CopyFile(source, destination string) error {\n\tinput, err := ioutil.ReadFile(source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(destination, input, 0644)\n}",
"func CopyFile(source, target string) error {\n\treturn copyFileMode(source, target, 0644)\n}",
"func (ssh *SSH) CopyFile(ctx context.Context, source, target string) (result entities.CommandResult, err error) {\n\terrorChan := make(chan error)\n\tdoneChan := make(chan struct{})\n\n\tgo func() {\n\t\tdefer close(doneChan)\n\t\tdefer close(errorChan)\n\t\tdefer func() {\n\t\t\tdoneChan <- struct{}{}\n\t\t}()\n\n\t\tsftpClient, err := sftp.NewClient(ssh.client)\n\t\tif err != nil {\n\t\t\terrorChan <- fmt.Errorf(\"SFTP client creating error error %v\", err)\n\t\t\treturn\n\t\t}\n\t\tdefer sftpClient.Close()\n\n\t\trf, err := openFile(sftpClient, target, os.O_CREATE|os.O_WRONLY)\n\t\tif err != nil {\n\t\t\terrorChan <- fmt.Errorf(\"target file %s open: %v\", target, err)\n\t\t\treturn\n\t\t}\n\t\tdefer rf.Close()\n\n\t\tlf, err := openFile(sftpClient, source, os.O_RDONLY)\n\t\tif err != nil {\n\t\t\terrorChan <- fmt.Errorf(\"source file %s open: %v\", target, err)\n\t\t\treturn\n\t\t}\n\t\tdefer lf.Close()\n\n\t\t_, err = io.Copy(rf, lf)\n\t\tif err != nil {\n\t\t\terrorChan <- fmt.Errorf(\"can't copy: %v\", err)\n\t\t}\n\t}()\n\n\tresult = entities.CommandResult{Body: fmt.Sprintf(\"/<mt-bulk>copy %s %s\", source, target)}\n\n\tselect {\n\tcase <-ctx.Done():\n\t\tresult.Error = fmt.Errorf(\"context cancelled\")\n\tcase <-time.After(30 * time.Second):\n\t\tresult.Error = fmt.Errorf(\"copy file timeouted\")\n\tcase err := <-errorChan:\n\t\tresult.Error = err\n\tcase <-doneChan:\n\t\tresult.Responses = append(result.Responses, result.Body)\n\t}\n\treturn result, result.Error\n}",
"func CopyFile(sourceFile string, destinationFile string) error {\n\tcontent, err := ioutil.ReadFile(sourceFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read source file %s: %v\", sourceFile, err)\n\t}\n\terr = ioutil.WriteFile(destinationFile, content, 0666)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write destination file %s: %v\", destinationFile, err)\n\t}\n\treturn nil\n}",
"func copyFile(src, dst string) error {\n\tvar err error\n\tvar srcfd *os.File\n\tvar dstfd *os.File\n\tvar srcinfo os.FileInfo\n\n\tif srcfd, err = os.Open(src); err != nil {\n\t\treturn err\n\t}\n\tdefer srcfd.Close()\n\n\tif dstfd, err = os.Create(dst); err != nil {\n\t\treturn err\n\t}\n\tdefer dstfd.Close()\n\n\tif _, err = io.Copy(dstfd, srcfd); err != nil {\n\t\treturn err\n\t}\n\tif srcinfo, err = os.Stat(src); err != nil {\n\t\treturn err\n\t}\n\treturn os.Chmod(dst, srcinfo.Mode())\n}",
"func copyFile(source string, dest string) error {\n\tln, err := os.Readlink(source)\n\tif err == nil {\n\t\treturn os.Symlink(ln, dest)\n\t}\n\ts, err := os.Open(source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer s.Close()\n\n\td, err := os.Create(dest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer d.Close()\n\n\t_, err = io.Copy(d, s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsi, err := os.Stat(source)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Chmod(dest, si.Mode())\n\n\treturn err\n}",
"func copyFile(src string, dest string) error {\n\tvar (\n\t\tsrcf *os.File\n\t\tdestf *os.File\n\t\terr error\n\t)\n\n\t// Open source file\n\tsrcf, err = os.Open(src)\n\n\tif err == nil {\n\t\tdefer srcf.Close()\n\n\t\t// Open destination file\n\t\tdestf, err = os.Create(dest)\n\n\t\tif err == nil {\n\t\t\tdefer destf.Close()\n\n\t\t\t// Copy data\n\t\t\t_, err = io.Copy(destf, srcf)\n\t\t}\n\t}\n\n\treturn err\n}",
"func CopyFile(dest, src string) error {\n\ts, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer s.Close()\n\n\td, err := os.Create(dest)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := io.Copy(d, s); err != nil {\n\t\td.Close()\n\t\treturn err\n\t}\n\n\treturn d.Close()\n}",
"func (e *BackupEnv) CopyFile(source string, dest string) (err error, backedByte int64) {\n sourcefile, err := os.Open(source)\n if err != nil {\n return err, 0\n }\n\n defer sourcefile.Close()\n\n var destfile io.Writer\n if (e.Options.Compress) {\n dest += \".lz4\"\n dfile, err := os.Create(dest)\n if err != nil {\n return err, 0\n }\n defer dfile.Close()\n destfile = lz4.NewWriter(dfile)\n } else {\n dfile, err := os.Create(dest)\n destfile = dfile\n if err != nil {\n return err, 0\n }\n defer dfile.Close()\n }\n\n _, err = io.Copy(destfile, sourcefile)\n if err != nil {\n return err, 0\n }\n\n sourceinfo, err := os.Stat(source);\n if err != nil {\n return err, 0\n }\n\n return nil, sourceinfo.Size();\n}",
"func FileCopy(srcFile, destFile string, dispOutput bool) error {\n\t// Open original file\n\toriginalFile, err := os.Open(srcFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer originalFile.Close()\n\n\t// Create new file\n\tnewFile, err := os.Create(destFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer newFile.Close()\n\n\t// Copy the bytes to destination from source\n\tbytesWritten, err := io.Copy(newFile, originalFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif dispOutput {\n\t\tlog.Printf(\"Copied %d bytes.\", bytesWritten)\n\t}\n\n\t// Commit the file contents\n\t// Flushes memory to disk\n\terr = newFile.Sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (s *SSHRunner) Copy(f assets.CopyableFile) error {\n\tdst := path.Join(path.Join(f.GetTargetDir(), f.GetTargetName()))\n\n\t// For small files, don't bother risking being wrong for no performance benefit\n\tif f.GetLength() > 2048 {\n\t\texists, err := fileExists(s, f, dst)\n\t\tif err != nil {\n\t\t\tklog.Infof(\"existence check for %s: %v\", dst, err)\n\t\t}\n\n\t\tif exists {\n\t\t\tklog.Infof(\"copy: skipping %s (exists)\", dst)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tsrc := f.GetSourcePath()\n\tklog.Infof(\"scp %s --> %s (%d bytes)\", src, dst, f.GetLength())\n\tif f.GetLength() == 0 {\n\t\tklog.Warningf(\"0 byte asset: %+v\", f)\n\t}\n\n\tsess, err := s.session()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"NewSession\")\n\t}\n\tdefer func() {\n\t\tif err := sess.Close(); err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tklog.Errorf(\"session close: %v\", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tw, err := sess.StdinPipe()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"StdinPipe\")\n\t}\n\t// The scpcmd below *should not* return until all data is copied and the\n\t// StdinPipe is closed. But let's use errgroup to make it explicit.\n\tvar g errgroup.Group\n\tvar copied int64\n\n\tg.Go(func() error {\n\t\tdefer w.Close()\n\t\theader := fmt.Sprintf(\"C%s %d %s\\n\", f.GetPermissions(), f.GetLength(), f.GetTargetName())\n\t\tfmt.Fprint(w, header)\n\t\tif f.GetLength() == 0 {\n\t\t\tklog.Warningf(\"asked to copy a 0 byte asset: %+v\", f)\n\t\t\tfmt.Fprint(w, \"\\x00\")\n\t\t\treturn nil\n\t\t}\n\n\t\tcopied, err = io.Copy(w, f)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"io.Copy\")\n\t\t}\n\t\tif copied != int64(f.GetLength()) {\n\t\t\treturn fmt.Errorf(\"%s: expected to copy %d bytes, but copied %d instead\", f.GetTargetName(), f.GetLength(), copied)\n\t\t}\n\t\tfmt.Fprint(w, \"\\x00\")\n\t\treturn nil\n\t})\n\n\tscp := fmt.Sprintf(\"sudo test -d %s && sudo scp -t %s\", f.GetTargetDir(), f.GetTargetDir())\n\tmtime, err := f.GetModTime()\n\tif err != nil {\n\t\tklog.Infof(\"error getting modtime for %s: %v\", dst, err)\n\t} else if mtime != (time.Time{}) {\n\t\tscp += fmt.Sprintf(\" && sudo touch -d \\\"%s\\\" %s\", mtime.Format(layout), dst)\n\t}\n\tout, err := sess.CombinedOutput(scp)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: %s\\noutput: %s\", scp, err, out)\n\t}\n\treturn g.Wait()\n}",
"func (app *Application) copyFile (file os.FileInfo, source, destination string) error {\n\n fileData, _ := ioutil.ReadFile (source)\n\n // detecting template and processing it\n if strings.HasSuffix (source, \".go.template\") {\n var buffer bytes.Buffer\n tmpl := template.Must (template.New (\"app\").Parse (string(fileData)))\n tmpl.Execute (&buffer, app)\n\n // overwriting file contents with data from processed template\n fileData = buffer.Bytes ()\n\n // removing tmp suffixes from file names\n destination = strings.Replace(destination, \".template\", \"\", -1)\n }\n\n if err := ioutil.WriteFile (destination, fileData, file.Mode ()); err == nil {\n fmt.Println(green, \" create:\", reset, destination)\n } else {\n return err\n }\n return nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
WriteBytesToFile writes a []byte to a specified file on the EC2 instance | func (ins *EC2RemoteClient) WriteBytesToFile(source []byte, destination string) error {
err := ins.cmdClient.WriteBytesToFile(source, destination)
return err
} | [
"func WriteBytesToFile(fn string, b []byte) {\n\tfil, err := os.Create(os.ExpandEnv(fn))\n\tif err != nil {\n\t\tchk.Panic(_fileio_err03, fn)\n\t}\n\tdefer fil.Close()\n\tif _, err = fil.Write(b); err != nil {\n\t\tchk.Panic(_fileio_err04, err.Error())\n\t}\n}",
"func writeFileBytes(filename string, bytesToWrite []byte) {\n\tvar file, err = os.OpenFile(filename, os.O_RDWR, 0644)\n\tHandle(\"\", err)\n\tdefer file.Close()\n\t_, err = file.Write(bytesToWrite)\n\terr = file.Sync()\n\tHandle(\"\", err)\n}",
"func WriteBytes(ctx context.Context, data []byte, filename string) error {\n\tif strings.HasPrefix(filename, \"gs://\") {\n\t\treturn writeGCSObject(ctx, data, filename)\n\t}\n\treturn ioutil.WriteFile(filename, data, os.ModePerm)\n}",
"func SaveBytesToFile(content []byte, outputFile string, overwrite bool) error {\n\texists, err := FileExists(outputFile)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"error while checking if the output file %q already exists\", outputFile))\n\t}\n\tif exists && !overwrite {\n\t\treturn fmt.Errorf(\"cannot persist the data since the output file %q already exists\", outputFile)\n\t}\n\tparentFolder := filepath.Dir(outputFile)\n\texists, err = DirExists(parentFolder)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"error while checking if the parent folder of %q exists\", outputFile))\n\t}\n\tif !exists {\n\t\terr = os.MkdirAll(parentFolder, DefaultPermission)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, fmt.Sprintf(\"failed to create tree structure to %q to store the data\", parentFolder))\n\t\t}\n\t}\n\terr = ioutil.WriteFile(outputFile, content, DefaultPermission)\n\tif err != nil {\n\t\treturn errors.Wrap(err, fmt.Sprintf(\"error while saving the data to file %q\", outputFile))\n\t}\n\treturn nil\n}",
"func FileWrite(f *os.File, b []byte) (int, error)",
"func WriteBytes(file *os.File, bytes []byte, particularOffset bool, addr int64) {\n\tfmt.Printf(\"%04X\\n\", addr)\n\tvar jmpFileLoc int64\n\tif particularOffset {\n\t\toriginalOffset, _ := file.Seek(0, 1)\n\t\tjmpFileLoc = originalOffset\n\t\tfile.Seek(addr, 0)\n\t}\n\tbytesWritten, err := file.Write(bytes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Wrote %d bytes.\\n\", bytesWritten)\n\tif particularOffset {\n\t\tfile.Seek(jmpFileLoc, 0)\n\t}\n}",
"func (sshConfig *SSHConfig) CopyBytesToFile(contentBytes []byte, remotePath string, permissions string) (err error) {\n\tbyteReader := bytes.NewReader(contentBytes)\n\terr = sshConfig.Copy(byteReader, remotePath, permissions, int64(len(contentBytes)))\n\treturn\n}",
"func WriteStringToFile(filename string, c []uint8) error {\n\tf, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0755)\n\tCheck(err)\n\tfile, err := f.WriteString(string(c))\n\tCheck(err)\n\tf.Sync()\n\n\tfmt.Printf(\"wrote %d bytes\\n\", file)\n\treturn err\n}",
"func writeBytes(data []byte, filename, contentType, bucketName string) error {\n\tctx, client := createGCSClient()\n\n\tw := client.Bucket(bucketName).Object(filename).NewWriter(ctx)\n\tw.ObjectAttrs.ContentType = contentType\n\n\tif _, err := io.Copy(w, bytes.NewReader(data)); err != nil {\n\t\tlog.Fatalln(err)\n\t\treturn err\n\t}\n\tif err := w.Close(); err != nil {\n\t\tlog.Fatalln(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func writeKeyToFile(keyBytes []byte, saveFileTo string) error {\n err := ioutil.WriteFile(saveFileTo, keyBytes, 0600)\n if err != nil {\n return err\n }\n\n log.Printf(\"Key saved to: %s\", saveFileTo)\n return nil\n}",
"func WriteToFile(fileName string, bytes []byte) error {\n\tf, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() error {\n\t\terr := f.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}()\n\n\t_, errorWrite := f.Write(bytes)\n\tif errorWrite != nil {\n\t\treturn errorWrite\n\t}\n\n\treturn nil\n}",
"func WriteByte(path string, content []byte) (int, error) {\n\tfile, err := os.Create(path)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer file.Close()\n\n\treturn file.Write(content)\n}",
"func WriteBinaryFile(filePath string, contents []byte) error {\n _ = EnsureDir(filepath.Dir(filePath))\n return ioutil.WriteFile(filePath, contents, CreateModePerm)\n}",
"func FileWriteAt(f *os.File, b []byte, off int64) (int, error)",
"func (si *StorageInstance) WriteFromBytes(url string, data []byte) error {\n\tfilename := stripFileURLPrefix(url)\n\tif err := si.fsUtils.MkdirAll(filepath.Dir(filename), 0755 /* -rwxr-xr-x */); err != nil {\n\t\treturn fmt.Errorf(\"couldn't create directories for file %s: %w\", filename, err)\n\t}\n\tif err := si.fsUtils.WriteFile(filename, data, 0644); err != nil {\n\t\treturn fmt.Errorf(\"couldn't write file %s: %w\", filename, err)\n\t}\n\treturn nil\n}",
"func (fw *FileWrapper) WriteBytes(b []byte) (int, error) {\n\t_, err := fw.SeekRel(0)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tn, err := fw.Write(b)\n\tif err == nil {\n\t\tfw.pos += int64(n)\n\t}\n\n\treturn n, err\n}",
"func writeKeyToFile(keyBytes []byte, saveFileTo string) error {\n\terr := ioutil.WriteFile(saveFileTo, keyBytes, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func writeKeyToFile(keyBytes []byte, saveFileTo string) error {\n\terr := ioutil.WriteFile(saveFileTo, keyBytes, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func WriteBWByte(ByteSlice [][]byte, fileName string) {\n\tnewfile, err := os.Create(fileName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer newfile.Close()\n\n\tfor i := 0; i < len(ByteSlice); i++ {\n\t\tnewfile.Write(ByteSlice[i])\n\t\t// newfile.WriteString(\"\\n\")\n\t\tnewfile.Sync()\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use SecretVersion_State.Descriptor instead. | func (SecretVersion_State) EnumDescriptor() ([]byte, []int) {
return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{1, 0}
} | [
"func (*SecretChange) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0}\n}",
"func (*SecretChange_Current) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 2}\n}",
"func (*SecretChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 3}\n}",
"func (*SecretVersion) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{1}\n}",
"func (*SecretChange_Modified) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 1}\n}",
"func (*SetStateSpec) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{16}\n}",
"func (Instance_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{2, 0}\n}",
"func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{170}\n}",
"func (*SecretChange_Added) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 0}\n}",
"func (InstanceConfig_State) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{1, 1}\n}",
"func (*UpdateSecretKeyValueRequest) Descriptor() ([]byte, []int) {\n\treturn file_secret_proto_rawDescGZIP(), []int{4}\n}",
"func (*UpdateSecretKeyValueResponse) Descriptor() ([]byte, []int) {\n\treturn file_secret_proto_rawDescGZIP(), []int{5}\n}",
"func (*GetState) Descriptor() ([]byte, []int) {\n\treturn file_peer_chaincode_shim_proto_rawDescGZIP(), []int{1}\n}",
"func (*SetStateRequest) Descriptor() ([]byte, []int) {\n\treturn file_vm_vm_proto_rawDescGZIP(), []int{3}\n}",
"func (*SecretEdge) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{1}\n}",
"func (*StateVersionInfo) Descriptor() ([]byte, []int) {\n\treturn file_metastateService_proto_rawDescGZIP(), []int{7}\n}",
"func (*SetSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{5}\n}",
"func (*GetStateMetadata) Descriptor() ([]byte, []int) {\n\treturn file_peer_chaincode_shim_proto_rawDescGZIP(), []int{2}\n}",
"func (*SetSecretResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{6}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use SecretVersion.ProtoReflect.Descriptor instead. | func (*SecretVersion) Descriptor() ([]byte, []int) {
return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{1}
} | [
"func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{170}\n}",
"func (*SecretReference) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{175}\n}",
"func (*SecretChange) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0}\n}",
"func (*CredentialsKVProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{1}\n}",
"func (*SecretMetadata) Descriptor() ([]byte, []int) {\n\treturn file_datatypes_proto_v1_protoconf_value_proto_rawDescGZIP(), []int{1}\n}",
"func (*SecretChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 3}\n}",
"func (*SecretChange_Modified) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 1}\n}",
"func (*SecretPayload) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{3}\n}",
"func (*TokenProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{0}\n}",
"func (*SecretList) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{173}\n}",
"func (*SecretChange_Added) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 0}\n}",
"func (*SecretChange_Current) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 2}\n}",
"func (*UpdateSecretKeyValueRequest) Descriptor() ([]byte, []int) {\n\treturn file_secret_proto_rawDescGZIP(), []int{4}\n}",
"func (*DeleteSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{7}\n}",
"func (*SecretEdge) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{1}\n}",
"func (*DeleteSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_secret_proto_rawDescGZIP(), []int{6}\n}",
"func (*SetSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{5}\n}",
"func (*PasswordComplexityPolicyUpdate) Descriptor() ([]byte, []int) {\n\treturn file_management_proto_rawDescGZIP(), []int{38}\n}",
"func (*UpdateSecretKeyValueResponse) Descriptor() ([]byte, []int) {\n\treturn file_secret_proto_rawDescGZIP(), []int{5}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use SecretPayload.ProtoReflect.Descriptor instead. | func (*SecretPayload) Descriptor() ([]byte, []int) {
return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{3}
} | [
"func (*PrivatePayload) Descriptor() ([]byte, []int) {\n\treturn file_gossip_message_proto_rawDescGZIP(), []int{17}\n}",
"func (*Secret) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{170}\n}",
"func (*LabelledPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{59}\n}",
"func (*ExternalPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{53}\n}",
"func (*SecretChange_Removed) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 3}\n}",
"func (*DeleteSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_flow_grpc_secrets_proto_rawDescGZIP(), []int{7}\n}",
"func (*SecretChange) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0}\n}",
"func (*TokenProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{0}\n}",
"func (*SecretReference) Descriptor() ([]byte, []int) {\n\treturn file_k8s_io_api_core_v1_generated_proto_rawDescGZIP(), []int{175}\n}",
"func (*SecretChange_Added) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 0}\n}",
"func (*PyPIPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{44}\n}",
"func (*DeleteSecretRequest) Descriptor() ([]byte, []int) {\n\treturn file_secret_proto_rawDescGZIP(), []int{6}\n}",
"func (*SecretMetadata) Descriptor() ([]byte, []int) {\n\treturn file_datatypes_proto_v1_protoconf_value_proto_rawDescGZIP(), []int{1}\n}",
"func (*Communication_Payload) Descriptor() ([]byte, []int) {\n\treturn file_proto_google_fhir_proto_dstu2_resources_proto_rawDescGZIP(), []int{13, 0}\n}",
"func (*ApiJwt) Descriptor() ([]byte, []int) {\n\treturn file_protos_collectors_generic_proto_rawDescGZIP(), []int{1}\n}",
"func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}",
"func (*SecretChange_Modified) Descriptor() ([]byte, []int) {\n\treturn edgelq_secrets_proto_v1alpha_secret_change_proto_rawDescGZIP(), []int{0, 1}\n}",
"func (*HelloRequest_Payload) Descriptor() ([]byte, []int) {\n\treturn file_dummy_proto_rawDescGZIP(), []int{0, 0}\n}",
"func (*ReadPayload) Descriptor() ([]byte, []int) {\n\treturn file_org_apache_beam_model_pipeline_v1_beam_runner_api_proto_rawDescGZIP(), []int{19}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Deprecated: Use Replication_UserManaged_Replica.ProtoReflect.Descriptor instead. | func (*Replication_UserManaged_Replica) Descriptor() ([]byte, []int) {
return file_google_cloud_secrets_v1beta1_resources_proto_rawDescGZIP(), []int{2, 1, 0}
} | [
"func (*ReplicaInfo) Descriptor() ([]byte, []int) {\n\treturn file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{0}\n}",
"func (*Replica) Descriptor() ([]byte, []int) {\n\treturn file_replica_replica_proto_rawDescGZIP(), []int{0}\n}",
"func (*SqlInstancesPromoteReplicaRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_proto_rawDescGZIP(), []int{22}\n}",
"func (ReplicaInfo_ReplicaType) EnumDescriptor() ([]byte, []int) {\n\treturn file_google_spanner_admin_instance_v1_spanner_instance_admin_proto_rawDescGZIP(), []int{0, 0}\n}",
"func (*CreateReplicaRequest) Descriptor() ([]byte, []int) {\n\treturn file_replica_replica_proto_rawDescGZIP(), []int{3}\n}",
"func (*RevokeUserPermissionMetadata) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_mdb_mysql_v1_user_service_proto_rawDescGZIP(), []int{12}\n}",
"func (*GetReplicaVisibleLengthResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{1}\n}",
"func (*CreateReplicaResponse) Descriptor() ([]byte, []int) {\n\treturn file_replica_replica_proto_rawDescGZIP(), []int{4}\n}",
"func (*Deprecation) Descriptor() ([]byte, []int) {\n\treturn file_external_cfgmgmt_response_nodes_proto_rawDescGZIP(), []int{8}\n}",
"func (*RevokeUserPermissionRequest) Descriptor() ([]byte, []int) {\n\treturn file_yandex_cloud_mdb_mysql_v1_user_service_proto_rawDescGZIP(), []int{11}\n}",
"func (*SqlInstancesStopReplicaRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_proto_rawDescGZIP(), []int{28}\n}",
"func (*GetDelegationTokenResponseProto) Descriptor() ([]byte, []int) {\n\treturn file_Security_proto_rawDescGZIP(), []int{4}\n}",
"func (*McUserProto) Descriptor() ([]byte, []int) {\n\treturn file_pkg_mcclient_mcuser_mcuser_proto_proto_rawDescGZIP(), []int{0}\n}",
"func (*ContactPreferencesMsg) Descriptor() ([]byte, []int) {\n\treturn file_user_management_user_management_service_proto_rawDescGZIP(), []int{23}\n}",
"func (*SqlInstancesStartReplicaRequest) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_proto_rawDescGZIP(), []int{27}\n}",
"func (*DatabaseInstance_SqlFailoverReplica) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_sql_v1beta4_cloud_sql_resources_proto_rawDescGZIP(), []int{14, 0}\n}",
"func (*SetRequest) Descriptor() ([]byte, []int) {\n\treturn file_replica_replica_proto_rawDescGZIP(), []int{5}\n}",
"func (*GetReplicaVisibleLengthRequestProto) Descriptor() ([]byte, []int) {\n\treturn file_ClientDatanodeProtocol_proto_rawDescGZIP(), []int{0}\n}",
"func (*LoginedUserRep) Descriptor() ([]byte, []int) {\n\treturn file_proto_user_proto_rawDescGZIP(), []int{3}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
buildLogMsg generates an empty CI test plan that prints msg to the build log. | func buildLogMsg(title, msg string) droneyaml.BuildItem {
return droneyaml.BuildItem{
Key: "Warning: " + title,
Build: droneyaml.Build{
Container: droneyaml.Container{
Image: "library/alpine:3.2",
Environment: droneyaml.MapEqualSlice([]string{"MSG=" + msg}),
},
Commands: []string{`echo "$MSG"`},
},
}
} | [
"func (t *tracer) buildMessage() string {\n\tif t.IsNull() {\n\t\treturn \"\"\n\t}\n\n\t// Note: this value is very important, it makes sure the internal calls of this package would not interfere with the real caller we want to catch\n\t// badly set and you will get a line number that does not match with the one corresponding to the call\n\tconst skipCallers int = 2\n\n\tmessage := t.taskSig\n\tif _, _, line, ok := runtime.Caller(skipCallers); ok {\n\t\tmessage += \" \" + t.funcName + t.callerParams + \" [\" + t.fileName + \":\" + strconv.Itoa(line) + \"]\"\n\t}\n\treturn message\n}",
"func buildNoRetryString(job string, outliers []string) string {\n\tnoRetryFmt := \"Failed non-flaky tests preventing automatic retry of %s:\\n\\n```\\n%s\\n```%s\"\n\textraFailedTests := \"\"\n\n\tlastIndex := len(outliers)\n\tif len(outliers) > maxFailedTestsToPrint {\n\t\tlastIndex = maxFailedTestsToPrint\n\t\textraFailedTests = fmt.Sprintf(\"\\n\\nand %d more.\", len(outliers)-maxFailedTestsToPrint)\n\t}\n\treturn fmt.Sprintf(noRetryFmt, job, strings.Join(outliers[:lastIndex], \"\\n\"), extraFailedTests)\n}",
"func (w *AlertMethod) BuildMessage(rule string, records []*alert.Record) (string, error) {\n\tmsg := fmt.Sprintf(\"%s\\n\", rule)\n\tfor _, record := range records {\n\t\ttext := record.Text\n\t\tif record.Fields != nil {\n\t\t\tvar fields = \"\"\n\t\t\tfor _, field := range record.Fields {\n\t\t\t\tfields += fmt.Sprintf(\"\\n%s: %d\", field.Key, field.Count)\n\t\t\t}\n\t\t\ttext += fields\n\t\t}\n\t\tmsg += text + \"\\n\"\n\t}\n\treturn msg, nil\n}",
"func sendTestLog(ctx context.Context, comm client.Communicator, conf *internal.TaskConfig, log *model.TestLog) (string, error) {\n\tif conf.ProjectRef.IsCedarTestResultsEnabled() {\n\t\treturn \"\", errors.Wrap(sendTestLogToCedar(ctx, conf.Task, comm, log), \"problem sending test logs to cedar\")\n\t}\n\n\ttd := client.TaskData{ID: conf.Task.Id, Secret: conf.Task.Secret}\n\tlogId, err := comm.SendTestLog(ctx, td, log)\n\treturn logId, errors.Wrap(err, \"problem sending test logs to evergreen\")\n}",
"func BuildMsg(from sdk.AccAddress, to sdk.AccAddress, coins sdk.Coins) sdk.Msg {\n\tinput := bank.NewInput(from, coins)\n\toutput := bank.NewOutput(to, coins)\n\tmsg := bank.NewMsgSend([]bank.Input{input}, []bank.Output{output})\n\treturn msg\n}",
"func BuildMessage(system *drone.System, repo *drone.Repo, build *drone.Build, tmpl string) string {\n\n\tpayload := &drone.Payload{\n\t\tSystem: system,\n\t\tRepo: repo,\n\t\tBuild: build,\n\t}\n\n\tmsg, err := template.RenderTrim(tmpl, payload)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\n\treturn msg\n}",
"func Log(msg string) {\n\tfmt.Println(color.BlueString(\"Builder:\") + \" \" + msg)\n}",
"func buildNewComment(jd *JobData, entries map[string]int, outliers []string) string {\n\tvar cmd string\n\tvar entryString []string\n\tif entries[jd.JobName] >= maxRetries {\n\t\tcmd = buildOutOfRetriesString(jd.JobName)\n\t\tlogWithPrefix(jd, \"expended all %d retries\\n\", maxRetries)\n\t} else if len(outliers) > 0 {\n\t\tcmd = buildNoRetryString(jd.JobName, outliers)\n\t\tlogWithPrefix(jd, \"%d failed tests are not flaky, cannot retry\\n\", len(outliers))\n\t} else {\n\t\tcmd = buildRetryString(jd.JobName, entries)\n\t\tlogWithPrefix(jd, \"all failed tests are flaky, triggering retry\\n\")\n\t}\n\t// print in sorted order so we can actually unit test the results\n\tvar keys []string\n\tfor test := range entries {\n\t\tkeys = append(keys, test)\n\t}\n\tsort.Strings(keys)\n\tfor _, test := range keys {\n\t\tentryString = append(entryString, fmt.Sprintf(\"%s | %d/%d\", test, entries[test], maxRetries))\n\t}\n\treturn fmt.Sprintf(commentTemplate, identifier, strings.Join(entryString, \"\\n\"), cmd)\n}",
"func startMessage(cfg *Config) {\n\tif len(cfg.Tasks) > 0 {\n\t\tcolor.Blue(\"You have %d tasks for execute\", len(cfg.Tasks))\n\t}\n\tif len(cfg.ParallelTasks) > 0 {\n\t\tcolor.Blue(\"You have %d parallel tasks for execute\", len(cfg.ParallelTasks))\n\t}\n}",
"func sendTestLog(ctx context.Context, comm client.Communicator, conf *internal.TaskConfig, log *model.TestLog) error {\n\treturn errors.Wrap(sendTestLogToCedar(ctx, conf.Task, comm, log), \"sending test logs to Cedar\")\n}",
"func (o *ControllerBuildOptions) generateBuildLogURL(podInterface typedcorev1.PodInterface, ns string, activity *v1.PipelineActivity, buildName string, pod *corev1.Pod, location v1.StorageLocation, settings *v1.TeamSettings, initGitCredentials bool, logMasker *kube.LogMasker) (string, error) {\n\n\tvar gitKind string\n\tif initGitCredentials && location.GitURL != \"\" {\n\t\tgitInfo, err := gits.ParseGitURL(location.GitURL)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"could not parse git URL for storage URL %s\", location.GitURL)\n\t\t}\n\t\tgitKind, err = o.GitServerKind(gitInfo)\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"could not determine git kind for storage URL %s\", location.GitURL)\n\t\t}\n\t}\n\tlog.Logger().Debugf(\"Collecting logs for %s to location %s\", activity.Name, location.Description())\n\tcoll, err := collector.NewCollector(location, o.Git(), gitKind)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"could not create Collector for pod %s in namespace %s with settings %#v\", pod.Name, ns, settings)\n\t}\n\n\towner := activity.RepositoryOwner()\n\trepository := activity.RepositoryName()\n\tbranch := activity.BranchName()\n\tbuildNumber := activity.Spec.Build\n\tif buildNumber == \"\" {\n\t\tbuildNumber = \"1\"\n\t}\n\n\tpathDir := filepath.Join(\"jenkins-x\", \"logs\", owner, repository, branch)\n\tfileName := filepath.Join(pathDir, buildNumber+\".log\")\n\n\tvar clientErrs []error\n\tkubeClient, err := o.KubeClient()\n\tclientErrs = append(clientErrs, err)\n\ttektonClient, _, err := o.TektonClient()\n\tclientErrs = append(clientErrs, err)\n\tjx, _, err := o.JXClient()\n\tclientErrs = append(clientErrs, err)\n\n\terr = errorutil.CombineErrors(clientErrs...)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"there was a problem obtaining one of the clients\")\n\t}\n\n\ttektonLogger := logs.TektonLogger{\n\t\tJXClient: jx,\n\t\tKubeClient: kubeClient,\n\t\tTektonClient: tektonClient,\n\t\tNamespace: ns,\n\t}\n\n\tlog.Logger().Debugf(\"Capturing running build logs for %s\", activity.Name)\n\treader := streamMaskedRunningBuildLogs(&tektonLogger, activity, buildName, logMasker)\n\tdefer reader.Close()\n\n\tif initGitCredentials {\n\t\tgc := &credentials.StepGitCredentialsOptions{}\n\t\tcopy := *o.CommonOptions\n\t\tgc.CommonOptions = ©\n\t\tgc.BatchMode = true\n\t\tlog.Logger().Info(\"running: jx step git credentials\")\n\t\terr = gc.Run()\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Wrapf(err, \"Failed to setup git credentials\")\n\t\t}\n\t}\n\n\tlog.Logger().Infof(\"storing logs for activity %s into storage at %s\", activity.Name, fileName)\n\tlogURL, err := coll.CollectData(reader, fileName)\n\tif err != nil {\n\t\tlog.Logger().Errorf(\"failed to store logs for activity %s into storage at %s: %s\", activity.Name, fileName, err.Error())\n\t\treturn \"\", err\n\t}\n\tlog.Logger().Infof(\"stored logs for activity %s into storage at %s\", activity.Name, fileName)\n\n\treturn logURL, nil\n}",
"func (m *Main) BuildReport() {\n\tpass := true\n\tfor _, t := range m.Tests {\n\t\tif !t.Result.Pass {\n\t\t\tpass = false\n\t\t\tbreak\n\t\t}\n\t}\n\tm.Report.Pass = pass\n\tfor _, t := range m.Tests {\n\t\tm.ReportResult(t.Result)\n\t}\n}",
"func (this *commonResult) addLog(header string, org_msg string) {\n\t_, file, line, _ := runtime.Caller(2)\n\t_, fileName := path.Split(file)\n\n\torg_msg = strings.TrimSuffix(org_msg, \"\\n\")\n\n\toutput := fmt.Sprintf(\n\t\theader+\" %s %s %s::%d\",\n\t\torg_msg,\n\t\ttime.Now().Format(time.RFC3339),\n\t\tfileName,\n\t\tline,\n\t)\n\tif !strings.HasSuffix(output, \"\\n\") {\n\t\toutput += \"\\n\"\n\t}\n\n\tthis.messages = append(this.messages, output)\n}",
"func NewLogCase(cfg *config.Config) Case {\n\tc := &LogCase{\n\t\tcfg: &cfg.Suite.Log,\n\t}\n\tc.initLogWrite(cfg.Suite.Concurrency)\n\tif c.cfg.TableNum <= 1 {\n\t\tc.cfg.TableNum = 1\n\t}\n\treturn c\n}",
"func BuildReport(r Result) string {\n\tif r.err != nil {\n\t\treturn \"-------------------------------------\\n\" + r.Url + \" report\\n-------------------------------------\\ntime : \" + r.time + \"\\nerror : \" + r.err.Error() + \"\\n\\n\"\n\t}\n\treturn \"-------------------------------------\\n\" + r.Url + \" report\\n-------------------------------------\\ntime : \" + r.time + \"\\nexpired : \" + strconv.FormatBool(!r.valid) + \"\\nexpiration : \" + r.expiry + \"\\n\\n\"\n}",
"func newBuildLogs(c *Client, namespace string) *buildLogs {\n\treturn &buildLogs{\n\t\tr: c,\n\t\tns: namespace,\n\t}\n}",
"func CIMessage(messageType string, data interface{}) {\n\tif RunningOnTeamCity() {\n\t\tmessage := \"##teamcity[\" + messageType\n\n\t\tswitch d := data.(type) {\n\t\tcase string:\n\t\t\tescaped := ciEscape(d)\n\t\t\tmessage += fmt.Sprintf(\" '%s'\", escaped)\n\t\tcase map[string]string:\n\t\t\tfor k, v := range d {\n\t\t\t\tescaped := ciEscape(v)\n\t\t\t\tmessage += fmt.Sprintf(\" %s='%s'\", k, escaped)\n\t\t\t}\n\t\t}\n\t\tmessage += \"]\"\n\t\tlog.Println(message)\n\t} else {\n\t\tlog.Printf(\"%s: %#v\", messageType, data)\n\t}\n}",
"func buildNotice(params GenerateNoticeParams) (notice Notice) {\n\tvar currentYear = time.Now().Year()\n\tnotice = Notice{\n\t\tLicensor: params.Licensor,\n\t\tProject: params.Project,\n\t\tProjectYears: fmt.Sprint(params.StartYear, \"-\", currentYear),\n\t}\n\tif params.StartYear == currentYear || params.StartYear == 0 {\n\t\tnotice.ProjectYears = fmt.Sprint(currentYear)\n\t}\n\treturn notice\n}",
"func Test_SimpleLogger(t *testing.T) {\n\tdefer b.Reset()\n\n\tt.Run(\"NoFields\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\tOutput: b,\n\t\t})\n\n\t\ttests := []struct {\n\t\t\tlevel string\n\t\t\tfile string\n\t\t\tfunction string\n\t\t\tf func(msg string)\n\t\t}{\n\t\t\t{\n\t\t\t\tlevel: \"ERROR\",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Error,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevel: \"INFO \",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Info,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Debug,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevel: \"WARN \",\n\t\t\t\tfile: \"log_test.go\",\n\t\t\t\tfunction: \"1()\",\n\t\t\t\tf: log.Warn,\n\t\t\t},\n\t\t}\n\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\ttest.f(\"there was an error\")\n\t\t\t\tdefer b.Reset()\n\n\t\t\t\tout := b.String()\n\n\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\tif level != test.level {\n\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t}\n\n\t\t\t\tif file != test.file {\n\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t}\n\n\t\t\t\tif function != test.function {\n\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t}\n\n\t\t\t\tif len(strings.Split(strings.TrimSpace(out), \"\\n\")) > 1 {\n\t\t\t\t\tt.Errorf(\"expected single line log point: '%s\", out)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"WithFields\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tt.Run(\"Single Field\", func(t *testing.T) {\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tkey string\n\t\t\t\tvalue interface{}\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"sample\",\n\t\t\t\t\tvalue: \"banana\",\n\t\t\t\t\tf: log.WithFields(log.Fields{\"sample\": \"banana\"}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"text\",\n\t\t\t\t\tvalue: 1,\n\t\t\t\t\tf: log.WithFields(log.Fields{\"text\": 1}).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"burger\",\n\t\t\t\t\tvalue: []string{\"sorry fellas\"},\n\t\t\t\t\tf: log.WithFields(log.Fields{\"burger\": []string{\"sorry fellas\"}}).Debug,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tkey: \"salad\",\n\t\t\t\t\tvalue: \"fortnite\",\n\t\t\t\t\tf: log.WithFields(log.Fields{\"salad\": \"fortnite\"}).Warn,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"there was an error\")\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tif ok, fields := hasField(test.key, test.value, out, t); !ok {\n\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", test.key, test.value, fields)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"Multiple Fields\", func(t *testing.T) {\n\t\t\tdefer b.Reset()\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tfields log.Fields\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t\t\"two\": \"2\",\n\t\t\t\t\t\t\"three\": []string{\"1\", \"2\", \"3\"},\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t\t\"two\": \"2\",\n\t\t\t\t\t\t\"three\": []string{\"1\", \"2\", \"3\"},\n\t\t\t\t\t}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t}).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t}).Warn,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t}).Debug,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"burger\")\n\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range test.fields {\n\t\t\t\t\t\tif ok, fields := hasField(k, v, out, t); !ok {\n\t\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", k, v, fields)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"Append Fields\", func(t *testing.T) {\n\t\t\tdefer b.Reset()\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tfields log.Fields\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t}).WithFields(log.Fields{}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"false\": true,\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"true\": false,\n\t\t\t\t\t}).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"okay but\": \"epic\",\n\t\t\t\t\t}).Warn,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t}).WithFields(log.Fields{\n\t\t\t\t\t\t\"dreamwork\": []bool{false, true},\n\t\t\t\t\t}).Debug,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"burger\")\n\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range test.fields {\n\t\t\t\t\t\tif ok, fields := hasField(k, v, out, t); !ok {\n\t\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", k, v, fields)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"With Error Field\", func(t *testing.T) {\n\t\t\tdefer b.Reset()\n\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\tOutput: b,\n\t\t\t})\n\n\t\t\ttests := []struct {\n\t\t\t\tlevel string\n\t\t\t\tfile string\n\t\t\t\tfunction string\n\t\t\t\tfields log.Fields\n\t\t\t\tf func(string)\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tlevel: \"ERROR\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithError(\n\t\t\t\t\t\terrors.New(\"sample text\"),\n\t\t\t\t\t).WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": 1,\n\t\t\t\t\t}).Error,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"INFO \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"sample\": \"this is a long piece of text\",\n\t\t\t\t\t}).WithError(errors.New(\"sample text\")).Info,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"WARN \",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"one\": nil,\n\t\t\t\t\t}).WithError(errors.New(\"sample text\")).Warn,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tlevel: \"DEBUG\",\n\t\t\t\t\tfile: \"log_test.go\",\n\t\t\t\t\tfunction: \"1()\",\n\t\t\t\t\tfields: log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t\t\"error\": errors.New(\"sample text\"),\n\t\t\t\t\t},\n\t\t\t\t\tf: log.WithFields(log.Fields{\n\t\t\t\t\t\t\"teamwork\": -1,\n\t\t\t\t\t}).WithError(errors.New(\"sample text\")).Debug,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.level, func(t *testing.T) {\n\t\t\t\t\ttest.f(\"burger\")\n\n\t\t\t\t\tdefer b.Reset()\n\n\t\t\t\t\tout := b.String()\n\n\t\t\t\t\t//t.Log(b.String())\n\n\t\t\t\t\tassureSingleNewline(out, t)\n\n\t\t\t\t\tlevel, file, function, _ := splitMessage(out, t)\n\n\t\t\t\t\tif level != test.level {\n\t\t\t\t\t\tt.Errorf(\"expected level: '%s'. actual level: '%s'\", test.level, level)\n\t\t\t\t\t}\n\n\t\t\t\t\tif file != test.file {\n\t\t\t\t\t\tt.Errorf(\"expected file: '%s'. actual file: '%s'\", test.file, file)\n\t\t\t\t\t}\n\n\t\t\t\t\tif function != test.function {\n\t\t\t\t\t\tt.Errorf(\"expected function: '%s'. actual function: '%s'\", test.function, function)\n\t\t\t\t\t}\n\n\t\t\t\t\tfor k, v := range test.fields {\n\t\t\t\t\t\tif ok, fields := hasField(k, v, out, t); !ok {\n\t\t\t\t\t\t\tt.Errorf(\"expected fields to contain: '%s=%v. actual fields total: %s\", k, v, fields)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t})\n\n\tt.Run(\"LogLevel\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tlevelName string\n\t\t\tlevel log.LogLevel\n\t\t\toutput bool\n\t\t\tf func(string)\n\t\t}{\n\t\t\t{\n\t\t\t\tlevelName: \"DEBUG\",\n\t\t\t\tlevel: log.LogDebug,\n\t\t\t\toutput: true,\n\t\t\t\tf: log.Debug,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevelName: \"ERROR\",\n\t\t\t\tlevel: log.LogInformational,\n\t\t\t\toutput: true,\n\t\t\t\tf: log.Error,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevelName: \"INFO \",\n\t\t\t\tlevel: log.LogWarning,\n\t\t\t\toutput: false,\n\t\t\t\tf: log.Info,\n\t\t\t},\n\t\t\t{\n\t\t\t\tlevelName: \"WARN \",\n\t\t\t\tlevel: log.LogError,\n\t\t\t\toutput: false,\n\t\t\t\tf: log.Warn,\n\t\t\t},\n\t\t}\n\n\t\tvar b strings.Builder\n\t\tfor _, test := range tests {\n\t\t\tt.Run(test.levelName, func(t *testing.T) {\n\t\t\t\tdefer b.Reset()\n\t\t\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\t\t\tOutput: &b,\n\t\t\t\t\tLogLevel: test.level,\n\t\t\t\t})\n\n\t\t\t\ttest.f(\"sample text\")\n\n\t\t\t\tif b.Len() > 0 && !test.output {\n\t\t\t\t\tt.Errorf(\"expected no output for log level %d, got '%s'\", test.level, b.String())\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"Clone\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\tOutput: b,\n\t\t})\n\n\t\te := log.WithFields(log.Fields{\n\t\t\t\"sample\": \"text\",\n\t\t})\n\n\t\te1 := e.Clone().WithFields(log.Fields{\n\t\t\t\"fortnite\": \"borger\",\n\t\t})\n\n\t\te = e.WithFields(log.Fields{\n\t\t\t\"hello\": \"world\",\n\t\t})\n\n\t\te.Info(\"e\")\n\n\t\tif ok, fields := hasField(\"fortnite\", \"borger\", b.String(), t); ok {\n\t\t\tt.Errorf(\"expected to not have '%s=%s' but it did: '%s'\", \"fortnite\", \"borger\", fields)\n\t\t}\n\n\t\tb.Reset()\n\t\te1.Info(\"e\")\n\n\t\tif ok, fields := hasField(\"hello\", \"world\", b.String(), t); ok {\n\t\t\tt.Errorf(\"expected to not have '%s=%s' but it did: '%s'\", \"hello\", \"world\", fields)\n\t\t}\n\t})\n\n\tt.Run(\"Context\", func(t *testing.T) {\n\t\tdefer b.Reset()\n\t\tlog.InitSimpleLogger(&log.Config{\n\t\t\tOutput: b,\n\t\t})\n\n\t\tctx := context.WithValue(context.Background(), log.Key, log.Fields{\n\t\t\t\"sample\": \"text\",\n\t\t})\n\n\t\tlog.WithContext(ctx).Info(\"hello epic reddit\")\n\n\t\tif ok, fields := hasField(\"sample\", \"text\", b.String(), t); !ok {\n\t\t\tt.Errorf(\"expected fields to contain: '%s=%v'. actual fields total: %s\", \"sample\", \"text\", fields)\n\t\t}\n\t})\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewDeployment converts BOSH deployment information into a deployment view for the dashboard | func NewDeployment(configTier config.Tier, configSlot config.Slot, boshDeployment *data.Deployment) (deployment *Deployment) {
tierName := configTier.Name
slotName := configSlot.Name
name := fmt.Sprintf("%s / %s - %s", tierName, slotName, boshDeployment.Name)
releases := make([]DisplayNameVersion, len(boshDeployment.Releases))
for releaseIndex := range releases {
boshRelease := boshDeployment.Releases[releaseIndex]
releases[releaseIndex] = DisplayNameVersion{
Name: boshRelease.Name,
Version: boshRelease.Version,
DisplayClass: "icon-minus blue",
}
}
stemcells := make([]DisplayNameVersion, len(boshDeployment.Stemcells))
for stemcellIndex := range stemcells {
boshStemcell := boshDeployment.Stemcells[stemcellIndex]
stemcells[stemcellIndex] = DisplayNameVersion{
Name: boshStemcell.Name,
Version: boshStemcell.Version,
DisplayClass: "icon-minus blue",
}
}
extraData := []Data{}
for _, dataChunk := range boshDeployment.ExtraData {
for _, dataChunkItem := range dataChunk.Data {
displayClass := "icon-minus blue"
if dataChunkItem.Indicator == "down" {
displayClass = "icon-arrow-down red"
}
if dataChunkItem.Indicator == "up" {
displayClass = "icon-arrow-up green"
}
dataItem := Data{
Label: dataChunkItem.Label,
Value: dataChunkItem.Value,
DisplayClass: displayClass,
}
extraData = append(extraData, dataItem)
}
}
if len(extraData) == 0 {
dataItem := Data{
Label: "backup status",
Value: "unknown",
DisplayClass: "icon-arrow-down red",
}
extraData = append(extraData, dataItem)
}
deployment = &Deployment{
Name: name,
Releases: releases,
Stemcells: stemcells,
ExtraData: extraData,
}
return
} | [
"func newDeployment(apployment *appscodev1alpha1.Apployment) *appsv1.Deployment {\n\tlabels := map[string]string{\n\t\t\"app\": \"Appscode\",\n\t\t\"controller\": apployment.Name,\n\t}\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: apployment.Spec.ApploymentName,\n\t\t\tNamespace: apployment.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(apployment, appscodev1alpha1.SchemeGroupVersion.WithKind(\"Apployment\")),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: apployment.Spec.Replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: apployment.Name,\n\t\t\t\t\t\t\tImage: apployment.Spec.Image,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}",
"func newDeployment(ctx context.Context, c *vim25.Client, params *types.VMDeployParams, l log.Logger) (*Deployment, error) { //nolint: unparam\n\td := newSimpleDeployment(c, params, l)\n\n\t// step 1. choose Datacenter and folder\n\tif err := d.chooseDatacenter(ctx, params.Datacenter); err != nil {\n\t\terr = errors.Wrap(err, \"Could not choose datacenter\")\n\t\tl.Log(\"err\", err)\n\t\treturn nil, err\n\t}\n\n\tif err := d.chooseFolder(ctx, params.Folder); err != nil {\n\t\terr = errors.Wrap(err, \"Could not choose folder\")\n\t\tl.Log(\"err\", err)\n\t\treturn nil, err\n\t}\n\n\t// step 2. choose computer resource\n\tresType := params.ComputerResources.Type\n\tresPath := params.ComputerResources.Path\n\tif err := d.chooseComputerResource(ctx, resType, resPath); err != nil {\n\t\terr = errors.Wrap(err, \"Could not choose Computer Resource\")\n\t\tl.Log(\"err\", err)\n\t\treturn nil, err\n\t}\n\n\t// step 3. Choose datastore cluster or single datastore\n\tdsType := params.Datastores.Type\n\tdsNames := params.Datastores.Names\n\tif err := d.chooseDatastore(ctx, dsType, dsNames); err != nil {\n\t\terr = errors.Wrap(err, \"Could not choose datastore\")\n\t\tl.Log(\"err\", err)\n\t\treturn nil, err\n\t}\n\n\treturn d, nil\n}",
"func newDeployment(deploymentName string, replicas int32, podLabels map[string]string, containerName, image string, strategyType appsv1.DeploymentStrategyType) *appsv1.Deployment {\n\tzero := int64(0)\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: deploymentName,\n\t\t\tLabels: podLabels,\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{MatchLabels: podLabels},\n\t\t\tStrategy: appsv1.DeploymentStrategy{\n\t\t\t\tType: strategyType,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: podLabels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tTerminationGracePeriodSeconds: &zero,\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: containerName,\n\t\t\t\t\t\t\tImage: image,\n\t\t\t\t\t\t\tSecurityContext: &corev1.SecurityContext{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}",
"func newDeployment(foo *samplecontrollerv1alpha1.Foo) *appsv1.Deployment {\n\tgv := samplecontrollerv1alpha1.GroupVersion\n\tlabels := map[string]string{\n\t\t\"app\": \"nginx\",\n\t\t\"controller\": foo.Name,\n\t}\n\treturn &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: foo.Spec.DeploymentName,\n\t\t\tNamespace: foo.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(foo, schema.GroupVersionKind{\n\t\t\t\t\tGroup: gv.Group,\n\t\t\t\t\tVersion: gv.Version,\n\t\t\t\t\tKind: ResourceKindSingular,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: foo.Spec.Replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"nginx\",\n\t\t\t\t\t\t\tImage: \"nginx:alpine\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}",
"func CreateDeployment(c *gin.Context) {\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Start create deployment\")\n\n\t// --- [ Get cluster ] --- //\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Get cluster\")\n\tcloudCluster, err := cloud.GetClusterFromDB(c)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Get cluster succeeded\")\n\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Bind json into DeploymentType struct\")\n\tvar deployment DeploymentType\n\tif err := c.BindJSON(&deployment); err != nil {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Bind failed\")\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Required field is empty.\"+err.Error())\n\t\tcloud.SetResponseBodyJson(c, http.StatusBadRequest, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusBadRequest,\n\t\t\tcloud.JsonKeyMessage: \"Required field is empty\",\n\t\t\tcloud.JsonKeyError: err,\n\t\t})\n\t\treturn\n\t}\n\n\tbanzaiUtils.LogDebug(banzaiConstants.TagCreateDeployment, fmt.Sprintf(\"Creating chart %s with version %s and release name %s\", deployment.Name, deployment.Version, deployment.ReleaseName))\n\tprefix := viper.GetString(\"dev.chartpath\")\n\tchartPath := path.Join(prefix, deployment.Name)\n\n\tvar values []byte = nil\n\tif deployment.Values != \"\" {\n\t\tparsedJSON, err := yaml.Marshal(deployment.Values)\n\t\tif err != nil {\n\t\t\tbanzaiUtils.LogError(banzaiConstants.TagCreateDeployment, \"Can't parse Values:\", err)\n\t\t}\n\t\tvalues, err = yaml.JSONToYAML(parsedJSON)\n\t\tif err != nil {\n\t\t\tbanzaiUtils.LogError(banzaiConstants.TagCreateDeployment, \"Can't convert JSON to YAML:\", err)\n\t\t\treturn\n\t\t}\n\t}\n\t// --- [ Get K8S Config ] --- //\n\tkubeConfig, err := cloud.GetK8SConfig(cloudCluster, c)\n\tif err != nil {\n\t\treturn\n\t}\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Getting K8S Config Succeeded\")\n\n\tbanzaiUtils.LogDebug(banzaiConstants.TagCreateDeployment, \"Custom values:\", string(values))\n\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Create deployment\")\n\trelease, err := helm.CreateDeployment(chartPath, deployment.ReleaseName, values, kubeConfig)\n\tif err != nil {\n\t\tbanzaiUtils.LogWarn(banzaiConstants.TagCreateDeployment, \"Error during create deployment.\", err.Error())\n\t\tcloud.SetResponseBodyJson(c, http.StatusNotFound, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusNotFound,\n\t\t\tcloud.JsonKeyMessage: fmt.Sprintf(\"%s\", err),\n\t\t})\n\t\treturn\n\t} else {\n\t\tbanzaiUtils.LogInfo(banzaiConstants.TagCreateDeployment, \"Create deployment succeeded\")\n\t}\n\n\treleaseName := release.Release.Name\n\treleaseNotes := release.Release.Info.Status.Notes\n\n\tbanzaiUtils.LogDebug(banzaiConstants.TagCreateDeployment, \"Release name:\", releaseName)\n\tbanzaiUtils.LogDebug(banzaiConstants.TagCreateDeployment, \"Release notes:\", releaseNotes)\n\n\t//Get ingress with deployment prefix TODO\n\t//Get local ingress address?\n\tendpoint, err := cloud.GetK8SEndpoint(cloudCluster, c)\n\tif err != nil {\n\t\tcloud.SetResponseBodyJson(c, http.StatusInternalServerError, gin.H{\n\t\t\tcloud.JsonKeyStatus: http.StatusInternalServerError,\n\t\t\tcloud.JsonKeyMessage: fmt.Sprintf(\"%s\", err),\n\t\t})\n\t\treturn\n\t}\n\n\tdeploymentUrl := fmt.Sprintf(\"http://%s:30080/zeppelin/\", endpoint)\n\tnotify.SlackNotify(fmt.Sprintf(\"Deployment Created: %s\", deploymentUrl))\n\tcloud.SetResponseBodyJson(c, http.StatusCreated, gin.H{\n\t\tcloud.JsonKeyStatus: http.StatusCreated,\n\t\tcloud.JsonKeyMessage: fmt.Sprintf(\"%s\", err),\n\t\tcloud.JsonKeyReleaseName: releaseName,\n\t\tcloud.JsonKeyUrl: deploymentUrl,\n\t\tcloud.JsonKeyNotes: releaseNotes,\n\t})\n\treturn\n}",
"func newDeployment(comp componentry.CertManagerComponent, cr operatorsv1alpha1.CertManagerDeployment, cstm DeploymentCustomizations) *appsv1.Deployment {\n\tdeploy := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: comp.GetResourceName(),\n\t\t\tNamespace: componentry.CertManagerDeploymentNamespace,\n\t\t\tLabels: cmdoputils.MergeMaps(componentry.StandardLabels, comp.GetLabels()),\n\t\t},\n\t\tSpec: comp.GetDeployment(),\n\t}\n\n\t// Add the service account entry to the base deployment\n\tdeploy.Spec.Template.Spec.ServiceAccountName = comp.GetServiceAccountName()\n\n\t// add the label selectors for the base deployment\n\tsel := comp.GetBaseLabelSelector()\n\tsel = metav1.AddLabelToSelector(sel, componentry.InstanceLabelKey, cr.Name)\n\tdeploy.Spec.Selector = sel\n\n\tselmap, _ := metav1.LabelSelectorAsMap(sel)\n\tdeploy.Spec.Template.ObjectMeta.Labels = selmap\n\n\t// If the CR contains a customized container image for the component, override our deployment\n\tif cstm.ContainerImage != \"\" {\n\t\t// TODO: Assumes a single container, if additional containers are added, this\n\t\t// will need to be updated.\n\t\tdeploy.Spec.Template.Spec.Containers[0].Image = cstm.ContainerImage\n\t}\n\n\t// we don't have any custom merge rules to consider\n\tspecialMergeRules := map[string]resourcemerge.MergeFunc{}\n\t// we have to lay out the flag overriding to be in the right format, we don't expect the\n\t// user to add the flags key\n\tf := overrideConfig{Flags: cstm.ContainerArgs}\n\tuserDefinedArgs, _ := json.Marshal(f)\n\n\tresult, err := resourcemerge.MergePrunedProcessConfig(\n\t\tcertmanagerconfigs.GetEmptyConfigFor(comp.GetName(), cmdoputils.CRVersionOrDefaultVersion(cr.Spec.Version, componentry.CertManagerDefaultVersion)), // the schema\n\t\tspecialMergeRules, // we have no merge rules\n\t\tcertmanagerconfigs.GetDefaultConfigFor(comp.GetName(), cmdoputils.CRVersionOrDefaultVersion(cr.Spec.Version, componentry.CertManagerDefaultVersion)), // our default\n\t\tuserDefinedArgs, // user overridden flags\n\t)\n\n\tif err != nil {\n\t\t// run with a default configuratio nif there was an error merging configs\n\t\tresult = certmanagerconfigs.GetDefaultConfigFor(comp.GetName(), cmdoputils.CRVersionOrDefaultVersion(cr.Spec.Version, componentry.CertManagerDefaultVersion))\n\t}\n\n\tdeploy.Spec.Template.Spec.Containers[0].Args = argSliceOf(result, certmanagerconfigs.GetEmptyConfigFor(comp.GetName(), componentry.CertManagerDefaultVersion))\n\n\treturn deploy\n}",
"func (cc *Controller) CreateDeployment(c *gin.Context) {\n\tdd := cloudrunner.DeploymentDescription{}\n\n\terr := c.ShouldBindJSON(&dd)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\n\t\treturn\n\t}\n\n\t// This could be a middleware. If it bothers you create a story and do so :).\n\tuser := c.GetHeader(\"X-Spinnaker-User\")\n\tif user == \"\" {\n\t\tc.JSON(http.StatusUnauthorized, gin.H{\"error\": \"X-Spinnaker-User header not set\"})\n\n\t\treturn\n\t}\n\n\troles, err := cc.FiatClient.Roles(user)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": err.Error()})\n\n\t\treturn\n\t}\n\n\tcredentials, err := cc.SqlClient.GetCredentials(dd.Account)\n\tif err != nil {\n\t\tif err == gorm.ErrRecordNotFound || err == sql.ErrCredentialsNotFound {\n\t\t\tc.JSON(http.StatusNotFound, gin.H{\"error\": \"credentials not found\"})\n\n\t\t\treturn\n\t\t} else {\n\t\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": err.Error()})\n\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Check if the user has r/w access to use the account. If 'credentials'\n\t// gets filtered down to an empty slice, they do not.\n\tcreds := filterCredentials([]cloudrunner.Credentials{credentials}, roles)\n\tif len(creds) == 0 {\n\t\tc.JSON(http.StatusForbidden,\n\t\t\tgin.H{\"error\": fmt.Sprintf(\"user %s does not have access to use account %s\", user, dd.Account)})\n\n\t\treturn\n\t}\n\n\t// Build the command.\n\tcmd, err := cc.buildCommand(dd, credentials)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": err.Error()})\n\n\t\treturn\n\t}\n\n\tt := internal.CurrentTimeUTC()\n\td := cloudrunner.Deployment{\n\t\tCommand: cmd.String(),\n\t\tID: uuid.New().String(),\n\t\tStartTime: &t,\n\t\tStatus: statusRunning,\n\t}\n\n\terr = cc.SqlClient.CreateDeployment(d)\n\tif err != nil {\n\t\tc.JSON(http.StatusInternalServerError, gin.H{\"error\": err.Error()})\n\n\t\treturn\n\t}\n\n\t// We need to run the command concurrently and immediately return the\n\t// deployment status to the user.\n\tgo cc.run(cmd, d)\n\n\tc.JSON(http.StatusCreated, d)\n}",
"func CreateDeployment(c *gin.Context) {\n\tlog := logger.WithFields(logrus.Fields{\"tag\": constants.TagCreateDeployment})\n\tparsedRequest, err := parseCreateUpdateDeploymentRequest(c)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"Error during parsing request!\",\n\t\t\tError: errors.Cause(err).Error(),\n\t\t})\n\t\treturn\n\t}\n\trelease, err := helm.CreateDeployment(parsedRequest.deploymentName,\n\t\tparsedRequest.deploymentReleaseName, parsedRequest.values, parsedRequest.kubeConfig,\n\t\tparsedRequest.clusterName)\n\tif err != nil {\n\t\t//TODO distinguish error codes\n\t\tlog.Errorf(\"Error during create deployment. %s\", err.Error())\n\t\tc.JSON(http.StatusBadRequest, htype.ErrorResponse{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tMessage: \"Error creating deployment\",\n\t\t\tError: err.Error(),\n\t\t})\n\t\treturn\n\t}\n\tlog.Info(\"Create deployment succeeded\")\n\n\treleaseName := release.Release.Name\n\treleaseNotes := release.Release.Info.Status.Notes\n\n\tlog.Debug(\"Release name: \", releaseName)\n\tlog.Debug(\"Release notes: \", releaseNotes)\n\tresponse := htype.CreateUpdateDeploymentResponse{\n\t\tReleaseName: releaseName,\n\t\tNotes: releaseNotes,\n\t}\n\tc.JSON(http.StatusCreated, response)\n\treturn\n}",
"func newDeployment(name, ns string, replicas int32) *apps.Deployment {\n\treturn &apps.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"Deployment\",\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tNamespace: ns,\n\t\t\tName: name,\n\t\t},\n\t\tSpec: apps.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{MatchLabels: testLabels()},\n\t\t\tStrategy: apps.DeploymentStrategy{\n\t\t\t\tType: apps.RollingUpdateDeploymentStrategyType,\n\t\t\t\tRollingUpdate: new(apps.RollingUpdateDeployment),\n\t\t\t},\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: testLabels(),\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: fakeContainerName,\n\t\t\t\t\t\t\tImage: fakeImage,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}",
"func deployment(r *http.Request) *APIError {\n\tnewDeployment := ProjectInfo{}\n\tdata, _ := ioutil.ReadAll(r.Body)\n\t_ = json.Unmarshal(data, &newDeployment)\n\n\tswitch method := r.Method; method {\n\tcase \"POST\":\n\t\treturn newDeployment.Insert(r.Context())\n\tcase \"DELETE\":\n\t\treturn newDeployment.Delete(r.Context())\n\t}\n\treturn nil\n}",
"func newCatDeployment(cr *v1.CatPicture) *appsv1.Deployment {\n\treturn &appsv1.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"cat-deployment\",\n\t\t\tNamespace: cr.Namespace,\n\t\t},\n\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"app\": \"cat-pictures\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tReplicas: &cr.Spec.Num,\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": \"cat-pictures\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"cat-pictures\",\n\t\t\t\t\t\t\tImage: catContainerImage,\n\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tHostPort: 8080,\n\t\t\t\t\t\t\t\t\tContainerPort: 8080,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: []coreV1.EnvVar{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"CAT_API_SIZE\",\n\t\t\t\t\t\t\t\t\tValue: o.Size,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName: \"CAT_API_FORMAT\",\n\t\t\t\t\t\t\t\t\tValue: o.Format,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}",
"func NewDeployment(\n\tcl client.Client,\n\tscheme *runtime.Scheme,\n\tconsoleobj *vectorizedv1alpha1.Console,\n\tclusterobj *vectorizedv1alpha1.Cluster,\n\tstore *Store,\n\tlog logr.Logger,\n) *Deployment {\n\treturn &Deployment{\n\t\tClient: cl,\n\t\tscheme: scheme,\n\t\tconsoleobj: consoleobj,\n\t\tclusterobj: clusterobj,\n\t\tstore: store,\n\t\tlog: log,\n\t}\n}",
"func toDeployment(s latest.ServiceConfig, objectMeta metav1.ObjectMeta, podTemplate apiv1.PodTemplateSpec, labelSelector map[string]string, original appsv1.Deployment) *appsv1.Deployment {\n\trevisionHistoryLimit := int32(3)\n\tdep := original.DeepCopy()\n\tdep.ObjectMeta = objectMeta\n\tdep.Spec.Replicas = toReplicas(s.Deploy.Replicas)\n\tdep.Spec.RevisionHistoryLimit = &revisionHistoryLimit\n\tdep.Spec.Template = forceRestartPolicy(podTemplate, apiv1.RestartPolicyAlways)\n\tdep.Spec.Strategy = toDeploymentStrategy(s, original.Spec.Strategy)\n\tdep.Spec.Selector = &metav1.LabelSelector{\n\t\tMatchLabels: labelSelector,\n\t}\n\treturn dep\n}",
"func newDeployment(name string) *api.ArangoDeployment {\n\tdepl := &api.ArangoDeployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: api.SchemeGroupVersion.String(),\n\t\t\tKind: deployment.ArangoDeploymentResourceKind,\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: strings.ToLower(name),\n\t\t},\n\t\tSpec: api.DeploymentSpec{\n\t\t\tImagePullPolicy: util.NewPullPolicy(v1.PullAlways),\n\t\t\tLicense: api.LicenseSpec{\n\t\t\t\tSecretName: util.NewString(testEnterpriseLicenseKeySecretName),\n\t\t\t},\n\t\t},\n\t}\n\n\t// set default image to the value given in env\n\t// some tests will override this value if they need a specific version\n\t// like update tests\n\t// if no value is given, use the operator default, which is arangodb/arangodb:latest\n\timage := strings.TrimSpace(os.Getenv(\"ARANGODIMAGE\"))\n\tif image != \"\" {\n\t\tdepl.Spec.Image = util.NewString(image)\n\t}\n\n\tdisableIPv6 := strings.TrimSpace(os.Getenv(\"TESTDISABLEIPV6\"))\n\tif disableIPv6 != \"\" && disableIPv6 != \"0\" {\n\t\tdepl.Spec.DisableIPv6 = util.NewBool(true)\n\t}\n\n\treturn depl\n}",
"func newDeployment(redis *redisv1.Redisdb) *appsv1beta2.Deployment {\n\tlabels := map[string]string{\n\t\t\"app\": \"redis\",\n\t\t\"controller\": redis.Name,\n\t}\n\treturn &appsv1beta2.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: redis.Name,\n\t\t\tNamespace: redis.Namespace,\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(redis, schema.GroupVersionKind{\n\t\t\t\t\tGroup: redisv1.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: redisv1.SchemeGroupVersion.Version,\n\t\t\t\t\tKind: \"Redis\",\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1beta2.DeploymentSpec{\n\t\t\tReplicas: redis.Spec.Replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"redis\",\n\t\t\t\t\t\t\tImage: \"redis:3.2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}",
"func createDeployment(k *kabanerov1alpha1.Kabanero, clientset *kubernetes.Clientset, c client.Client, name string, image string, env []corev1.EnvVar, envFrom []corev1.EnvFromSource, livenessProbe *corev1.Probe, reqLogger logr.Logger) error {\n\tcl := clientset.AppsV1().Deployments(k.ObjectMeta.Namespace)\n\n\t// Check if the Deployment resource already exists.\n\tdInstance := &appsv1.Deployment{}\n\terr := c.Get(context.Background(), types.NamespacedName{\n\t\tName: name,\n\t\tNamespace: k.ObjectMeta.Namespace}, dInstance)\n\n\tdeploymentExists := true\n\tif err != nil {\n\t\tif apierrors.IsNotFound(err) == false {\n\t\t\treturn err\n\t\t}\n\n\t\t// The deployment does not already exist. Create one.\n\t\tdeploymentExists = false\n\n\t\t// Gather Kabanero operator ownerReference information.\n\t\townerRef, err := getOwnerReference(k, c, reqLogger)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Initialize the deployment\n\t\tvar repCount int32 = 1\n\t\tdInstance = &appsv1.Deployment{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: name,\n\t\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t\t{\n\t\t\t\t\t\tAPIVersion: ownerRef.APIVersion,\n\t\t\t\t\t\tKind: ownerRef.Kind,\n\t\t\t\t\t\tName: ownerRef.Name,\n\t\t\t\t\t\tUID: ownerRef.UID,\n\t\t\t\t\t\tController: ownerRef.Controller,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSpec: appsv1.DeploymentSpec{\n\t\t\t\tReplicas: &repCount,\n\t\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\t\tServiceAccountName: name,\n\t\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: name,\n\t\t\t\t\t\t\t\tImagePullPolicy: \"Always\",\n\t\t\t\t\t\t\t\tPorts: []corev1.ContainerPort{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tContainerPort: 9443,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\t// Here we update the things that can change. In the future we could\n\t// consider re-applying all the fields in case someone hand-edited the\n\t// deployment object in an incompatible way.\n\tdInstance.Spec.Template.Spec.Containers[0].Env = env\n\tdInstance.Spec.Template.Spec.Containers[0].EnvFrom = envFrom\n\tdInstance.Spec.Template.Spec.Containers[0].Image = image\n\tdInstance.Spec.Template.Spec.Containers[0].LivenessProbe = livenessProbe\n\n\tif deploymentExists == false {\n\t\treqLogger.Info(fmt.Sprintf(\"createDeployment: Deployment for create: %v\", dInstance))\n\n\t\t_, err = cl.Create(dInstance)\n\t} else {\n\t\treqLogger.Info(fmt.Sprintf(\"createDeployment: Deployment for update: %v\", dInstance))\n\n\t\t_, err = cl.Update(dInstance)\n\t}\n\n\treturn err\n}",
"func Create(deployment *Deployment) (*Deployment, error) {\n\targs := []string{\n\t\t\"deployment-manager\",\n\t\t\"deployments\",\n\t\t\"create\",\n\t\tdeployment.config.Name,\n\t\t\"--config\",\n\t\tdeployment.configFile,\n\t\t\"--project\",\n\t\tdeployment.config.Project,\n\t}\n\t_, err := runGCloud(args...)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to create deployment: %v, error: %v\", deployment, err)\n\t\treturn nil, err\n\t}\n\toutputs, err := GetOutputs(deployment.config.Name, deployment.config.Project)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to get outputs for deployment: %v, error: %v\", deployment, err)\n\t\treturn nil, err\n\t}\n\tdeployment.Outputs = outputs\n\treturn deployment, nil\n}",
"func createDeployment(cluster *client.VanClient, annotations map[string]string) (*v1.Deployment, error) {\n\tname := \"nginx\"\n\treplicas := int32(1)\n\tdep := &v1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: cluster.Namespace,\n\t\t\tAnnotations: annotations,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": name,\n\t\t\t},\n\t\t},\n\t\tSpec: v1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"app\": name,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: corev1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"app\": name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: corev1.PodSpec{\n\t\t\t\t\tContainers: []corev1.Container{\n\t\t\t\t\t\t{Name: \"nginx\", Image: \"quay.io/skupper/nginx-unprivileged:stable-alpine\", Ports: []corev1.ContainerPort{{Name: \"web\", ContainerPort: 8080}}, ImagePullPolicy: corev1.PullIfNotPresent},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: corev1.RestartPolicyAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// Deploying resource\n\tdep, err := cluster.KubeClient.AppsV1().Deployments(cluster.Namespace).Create(context.TODO(), dep, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Wait for deployment to be ready\n\tdep, err = kube.WaitDeploymentReadyReplicas(dep.Name, cluster.Namespace, 1, cluster.KubeClient, timeout, interval)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dep, nil\n}",
"func (h *Handler) CreateDeploymentHandler(w http.ResponseWriter, r *http.Request) {\n\t// Decode request\n\tvar reqBody CreateDeploymentRequest\n\tdecoder := json.NewDecoder(r.Body)\n\tif err := decoder.Decode(&reqBody); err != nil {\n\t\twriteJSONError(w, err.Error(), 422)\n\t\treturn\n\t}\n\t// Prepare business call\n\tid, err := h.deploymentEngine.CreateDeployment(\n\t\t&engine.Deployment{\n\t\t\tName: reqBody.Name,\n\t\t\tChartName: reqBody.ChartName,\n\t\t\tChartVersion: reqBody.ChartVersion,\n\t\t\tRepositoryURL: reqBody.RepositoryURL,\n\t\t})\n\tif err != nil {\n\t\t// TODO: Get the status code from map of errors\n\t\twriteJSONError(w, err.Error(),\n\t\t\thttp.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Encode response\n\trespBody := CreateDeploymentResponse{ID: id}\n\terr = json.NewEncoder(w).Encode(respBody)\n\tif err != nil {\n\t\twriteJSONError(w, err.Error(),\n\t\t\thttp.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusCreated)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ContainsFilterTag determines if a Deployment has a release matching filterTag | func (deployment *Deployment) ContainsFilterTag(filterTag string) bool {
if filterTag == "" {
return true
}
for _, release := range deployment.Releases {
if release.Name == filterTag {
return true
}
}
return false
} | [
"func isReleaseTag(eventType string, payload api.WebhookGithub) bool {\n\tif api.GithubWebhookPush == eventType {\n\t\tif nil != payload[api.GithubWebhookFlagRef] &&\n\t\t\tstrings.Contains(payload[api.GithubWebhookFlagRef].(string),\n\t\t\t\tapi.GithubWebhookFlagTags) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func TagFilter(filtertags []string) SnippetFilter {\n\treturn func(snippet *snippet.Snippet) bool {\n\t\ttags := snippet.GetVar(\"tags\")\n\t\t// Implement OR-Logic\n\t\tfor _, tag := range strings.Split(tags, \",\") {\n\t\t\tfor _, filterTag := range filtertags {\n\t\t\t\tif filterTag == tag {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n}",
"func (_Htlc *HtlcFilterer) FilterRelease(opts *bind.FilterOpts) (*HtlcReleaseIterator, error) {\n\n\tlogs, sub, err := _Htlc.contract.FilterLogs(opts, \"Release\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &HtlcReleaseIterator{contract: _Htlc.contract, event: \"Release\", logs: logs, sub: sub}, nil\n}",
"func TagNameContains(v string) predicate.GithubRelease {\n\treturn predicate.GithubRelease(sql.FieldContains(FieldTagName, v))\n}",
"func ApplyFilters(secret v1.Secret, sub *appv1alpha1.Subscription) (v1.Secret, bool) {\n\tif klog.V(utils.QuiteLogLel) {\n\t\tfnName := utils.GetFnName()\n\t\tklog.Infof(\"Entering: %v()\", fnName)\n\n\t\tdefer klog.Infof(\"Exiting: %v()\", fnName)\n\t}\n\n\tsecret = CleanUpObject(secret)\n\n\tif sub.Spec.PackageFilter != nil {\n\t\tif sub.Spec.Package != \"\" && sub.Spec.Package != secret.GetName() {\n\t\t\tklog.Info(\"Name does not match, skiping:\", sub.Spec.Package, \"|\", secret.GetName())\n\t\t\treturn secret, false\n\t\t}\n\n\t\tsubAnno := sub.GetAnnotations()\n\t\tklog.V(10).Info(\"checking annotations filter:\", subAnno)\n\n\t\tif subAnno != nil {\n\t\t\tsecretsAnno := secret.GetAnnotations()\n\t\t\tfor k, v := range subAnno {\n\t\t\t\tif secretsAnno[k] != v {\n\t\t\t\t\tklog.Info(\"Annotation filter does not match:\", k, \"|\", v, \"|\", secretsAnno[k])\n\t\t\t\t\treturn secret, false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn secret, true\n}",
"func passFilter(image *runtime.Image, filter *runtime.ImageFilter) bool {\n\tif filter == nil {\n\t\treturn true\n\t}\n\n\tif filter.Image == nil {\n\t\treturn true\n\t}\n\n\timageName := filter.Image.Image\n\tfor _, name := range image.RepoTags {\n\t\tif imageName == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func matchTag(ctxt *build.Context, name string, allTags map[string]bool) bool {\n\tif allTags != nil {\n\t\tallTags[name] = true\n\t}\n\n\t// special tags\n\tif ctxt.CgoEnabled && name == \"cgo\" {\n\t\treturn true\n\t}\n\tif name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {\n\t\treturn true\n\t}\n\tif ctxt.GOOS == \"android\" && name == \"linux\" {\n\t\treturn true\n\t}\n\tif ctxt.GOOS == \"illumos\" && name == \"solaris\" {\n\t\treturn true\n\t}\n\tif ctxt.GOOS == \"ios\" && name == \"darwin\" {\n\t\treturn true\n\t}\n\tif name == \"unix\" && unixOS[ctxt.GOOS] {\n\t\treturn true\n\t}\n\tif name == \"boringcrypto\" {\n\t\tname = \"goexperiment.boringcrypto\" // boringcrypto is an old name for goexperiment.boringcrypto\n\t}\n\n\t// other tags\n\tfor _, tag := range ctxt.BuildTags {\n\t\tif tag == name {\n\t\t\treturn true\n\t\t}\n\t}\n\ttoolTags := extractToolTags(ctxt)\n\tfor _, tag := range toolTags {\n\t\tif tag == name {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, tag := range ctxt.ReleaseTags {\n\t\tif tag == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func TagContains(t kappnavv1.Tag, substr string) bool {\n\treturn strings.Contains(string(t), substr)\n}",
"func (resource Resource) HasTag(tag string) bool {\n\tfor _, t := range resource.Tags {\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func hasTag(ctx context.Context, tags []*ec2.Tag, key string, value string, awsVolume string, awsRegion string) bool {\n\tfor i := range tags {\n\t\tif *tags[i].Key == key && *tags[i].Value == value {\n\t\t\tlogWithCtx(ctx).WithFields(log.Fields{\"tagKey\": key, \"tagValue\": value, \"volId\": awsVolume, \"region\": awsRegion}).Info(\"Tag value already exists\")\n\t\t\ttagsExisting.Inc()\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func FilterDeployments(c *cli.Context) []Kind {\n\targs := c.Args()\n\tuid := c.String(\"uid\")\n\tlabel := c.String(\"label\")\n\tnamespace := c.String(\"namespace\")\n\n\tvar candidates []Kind\n\tvar found []Kind\n\n\t// check args which should contains pod names\n\tfor _, v := range GetDeployments() {\n\t\tif c.NArg() > 0 {\n\t\t\tfor _, a := range args {\n\t\t\t\tif utils.Match(v.Name, a, true) {\n\t\t\t\t\tcandidates = append(candidates, v)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tcandidates = append(candidates, v)\n\t\t}\n\t}\n\n\tfor _, v := range candidates {\n\t\to := v.(*Deployment)\n\t\t// check uid\n\t\tif uid != \"\" && !utils.Match(o.UID, uid, true) {\n\t\t\tcontinue\n\t\t}\n\t\t// check namespace\n\t\tif namespace != \"\" && !utils.Match(o.Namespace, namespace, true) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// check label\n\t\tif label != \"\" {\n\t\t\t// one or more labels may be provided\n\t\t\tlabelNotFound := false\n\t\t\tfor _, l := range strings.Split(label, \";\") {\n\t\t\t\tif !o.LabelFound(l) {\n\t\t\t\t\tlabelNotFound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif labelNotFound {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// found it if it reachs this point\n\t\tfound = append(found, o)\n\t}\n\n\treturn found\n}",
"func IsReleasedTagVersion(version string) bool {\n\treturn regexp.MustCompile(`^v\\d+\\.\\d+\\.\\d$`).MatchString(version)\n}",
"func (m Mineral) HasTag(tag string) bool {\n\tfor _, t := range m.Tags {\n\t\tif t == tag {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (s Space) HasTag(needle string) bool {\n\tisPrefix := strings.HasSuffix(needle, \"/\")\n\tfor i := range s.Tags {\n\t\tswitch isPrefix {\n\t\tcase true:\n\t\t\tif strings.HasPrefix(s.Tags[i], needle) {\n\t\t\t\treturn true\n\t\t\t}\n\t\tcase false:\n\t\t\tif s.Tags[i] == needle {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (fft *FreeFormTags) Contains(k string) (ok bool) {\n\tif fft.Tags == nil {\n\t\treturn false\n\t}\n\treturn fft.Tags.Contains(k)\n}",
"func (r TaggedResource) FilterThroughTags(filterTags []Tag) bool {\n\tif len(filterTags) == 0 {\n\t\treturn true\n\t}\n\n\ttagMatches := 0\n\n\tfor _, resourceTag := range r.Tags {\n\t\tfor _, filterTag := range filterTags {\n\t\t\tif resourceTag.Key == filterTag.Key {\n\t\t\t\tr, _ := regexp.Compile(filterTag.Value)\n\t\t\t\tif r.MatchString(resourceTag.Value) {\n\t\t\t\t\ttagMatches++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tagMatches == len(filterTags)\n}",
"func (dc *DockerClient) ValidTag(desiredTag, repository string) (bool, error) {\n\thub, err := registry.New(dc.registryURL, dc.username, dc.password)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\ttags, err := hub.Tags(repository)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, tag := range tags {\n\t\tif tag == desiredTag {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}",
"func (s *DockerKubeletService) HasFilter(filter containers.FilterType) bool {\n\treturn false\n}",
"func (s *KubePodService) HasFilter(filter containers.FilterType) bool {\n\treturn false\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewUncertaintyGroup furnishes an UncertaintyGroup for a given set of actions where their quantity is known a priori. | func NewUncertaintyGroup(count uint) UncertaintyGroup {
return &uncertaintyGroup{
remaining: count,
results: make(chan error),
}
} | [
"func newGroupAtLeastOnce() *Instruction {\n\treturn &Instruction{\n\t\tType: GroupAtLeastOnceInst,\n\t\tName: \"AtLeastOnce\",\n\t}\n}",
"func newAggrGroup(ctx context.Context, labels model.LabelSet, r *Route, to func(time.Duration) time.Duration, logger log.Logger) *aggrGroup {\n\tif to == nil {\n\t\tto = func(d time.Duration) time.Duration { return d }\n\t}\n\tag := &aggrGroup{\n\t\tlabels: labels,\n\t\trouteKey: r.Key(),\n\t\topts: &r.RouteOpts,\n\t\ttimeout: to,\n\t\talerts: store.NewAlerts(),\n\t\tdone: make(chan struct{}),\n\t}\n\tag.ctx, ag.cancel = context.WithCancel(ctx)\n\n\tag.logger = log.With(logger, \"aggrGroup\", ag)\n\n\t// Set an initial one-time wait before flushing\n\t// the first batch of notifications.\n\tag.next = time.NewTimer(ag.opts.GroupWait)\n\n\treturn ag\n}",
"func (st *buildStatus) newTestSet(testStats *buildstats.TestStats, names []distTestName) (*testSet, error) {\n\tset := &testSet{\n\t\tst: st,\n\t\ttestStats: testStats,\n\t}\n\tfor _, name := range names {\n\t\tset.items = append(set.items, &testItem{\n\t\t\tset: set,\n\t\t\tname: name,\n\t\t\tduration: testStats.Duration(st.BuilderRev.Name, name.Old),\n\t\t\ttake: make(chan token, 1),\n\t\t\tdone: make(chan token),\n\t\t})\n\t}\n\treturn set, nil\n}",
"func newExpectedMachineSetCreateAction(cluster *clusteroperator.Cluster, name string) expectedMachineSetCreateAction {\n\treturn expectedMachineSetCreateAction{\n\t\tnamePrefix: getNamePrefixForMachineSet(cluster, name),\n\t}\n}",
"func assertUncategorized(u *Unit) {\n\tif u.group != nil && u.group != uncategorized {\n\t\tpanic(fmt.Errorf(\"unit %q is already a member of group %q\", u.ID(), u.group.Name()))\n\t}\n}",
"func (l *GroupLookup) newKeyGroup(entries []groupKeyListElement) *groupKeyList {\n\tid := l.nextID\n\tl.nextID++\n\treturn &groupKeyList{\n\t\tid: id,\n\t\telements: entries,\n\t}\n}",
"func GroupCreateFailure(actionID string, errors []string) *Action {\n\treturn constructFailureAction(actionID, constants.GroupCreateFailure, errors)\n}",
"func newGroupMutation(c config, op Op, opts ...groupOption) *GroupMutation {\n\tm := &GroupMutation{\n\t\tconfig: c,\n\t\top: op,\n\t\ttyp: TypeGroup,\n\t\tclearedFields: make(map[string]struct{}),\n\t}\n\tfor _, opt := range opts {\n\t\topt(m)\n\t}\n\treturn m\n}",
"func newTiKVGroups(c *PingcapV1alpha1Client, namespace string) *tiKVGroups {\n\treturn &tiKVGroups{\n\t\tclient: c.RESTClient(),\n\t\tns: namespace,\n\t}\n}",
"func newStatGroup(size uint64) *statGroup {\n\treturn &statGroup{\n\t\tvalues: make([]float64, size),\n\t\tcount: 0,\n\t}\n}",
"func newGroupAnyOrder() *Instruction {\n\treturn &Instruction{\n\t\tType: GroupAnyOrderInst,\n\t\tName: \"AnyOrder\",\n\t}\n}",
"func TestProposeBadGroup(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tstopper := stop.NewStopper()\n\tcluster := newTestCluster(nil, 3, stopper, t)\n\tdefer stopper.Stop()\n\terr := <-cluster.nodes[1].SubmitCommand(7, \"asdf\", []byte{})\n\tif err == nil {\n\t\tt.Fatal(\"did not get expected error\")\n\t}\n}",
"func NewGroup(ctx context.Context) *errGroup {\n\tnewCtx, cancel := context.WithCancel(ctx)\n\treturn &errGroup{\n\t\tctx: newCtx,\n\t\tcancel: cancel,\n\t}\n}",
"func (_IFactorySpace *IFactorySpaceTransactor) CreateGroup(opts *bind.TransactOpts) (*types.Transaction, error) {\n\treturn _IFactorySpace.contract.Transact(opts, \"createGroup\")\n}",
"func (c *UMemClient) NewCreateUMemcacheGroupRequest() *CreateUMemcacheGroupRequest {\n\treq := &CreateUMemcacheGroupRequest{}\n\n\t// setup request with client config\n\tc.Client.SetupRequest(req)\n\n\t// setup retryable with default retry policy (retry for non-create action and common error)\n\treq.SetRetryable(false)\n\treturn req\n}",
"func New(ctx context.Context, concurrency int) (*Group, context.Context) {\n\tif concurrency < 1 {\n\t\tconcurrency = 1\n\t}\n\n\tparent, ctx := errgroup.WithContext(ctx)\n\treturn &Group{\n\t\tlimiter: make(chan struct{}, concurrency),\n\t\tparent: parent,\n\t\tctx: ctx,\n\t}, ctx\n}",
"func newExpectedMachineSetDeleteAction(cluster *clusteroperator.Cluster, name string) expectedMachineSetDeleteAction {\n\treturn expectedMachineSetDeleteAction{\n\t\tnamePrefix: getNamePrefixForMachineSet(cluster, name),\n\t}\n}",
"func NewErrGroup(ctx context.Context, concurrency int) *ErrGroup {\n\tctx, cancel := context.WithCancel(ctx)\n\treturn &ErrGroup{\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tlimit: make(chan struct{}, concurrency),\n\t}\n}",
"func newProviderGroup(k key) *providerGroup {\n\tifaceKey := key{\n\t\tres: reflect.SliceOf(k.res),\n\t\ttyp: ptGroup,\n\t}\n\n\treturn &providerGroup{\n\t\tresult: ifaceKey,\n\t\tpl: parameterList{},\n\t}\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
ByteOrder returns the byte order for the CPU's native endianness. | func ByteOrder() binary.ByteOrder { return byteOrder } | [
"func GetByteOrder() binary.ByteOrder {\n\tbuf := [2]byte{}\n\t*(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD)\n\n\tswitch buf {\n\tcase [2]byte{0xCD, 0xAB}:\n\t\treturn binary.LittleEndian\n\tcase [2]byte{0xAB, 0xCD}:\n\t\treturn binary.BigEndian\n\tdefault:\n\t\tpanic(\"Could not determine native endianness.\")\n\t}\n}",
"func NativeEndian() binary.ByteOrder {\n\t// TODO(mdlayher): consider deprecating and removing this function for v2.\n\treturn native.Endian\n}",
"func (bio *BinaryIO) ByteOrder() binary.ByteOrder {\n\treturn bio.order\n}",
"func (jbobject *JavaNioCharBuffer) Order() *JavaNioByteOrder {\n\tjret, err := jbobject.CallMethod(javabind.GetEnv(), \"order\", \"java/nio/ByteOrder\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tretconv := javabind.NewJavaToGoCallable()\n\tdst := &javabind.Callable{}\n\tretconv.Dest(dst)\n\tif err := retconv.Convert(javabind.ObjectRef(jret)); err != nil {\n\t\tpanic(err)\n\t}\n\tretconv.CleanUp()\n\tunique_x := &JavaNioByteOrder{}\n\tunique_x.Callable = dst\n\treturn unique_x\n}",
"func GetEndian() ProtobufArchType {\n\tmu.RLock()\n\tdefer mu.RUnlock()\n\treturn endian_type\n\n}",
"func getEndian() (ret bool) {\n\tvar i = 0x1\n\tbs := (*[intSize]byte)(unsafe.Pointer(&i))\n\n\treturn bs[0] == 0\n}",
"func getByteOrder(byteOrder binary.ByteOrder) bool {\n\tle := false\n\tswitch byteOrder {\n\tcase binary.BigEndian:\n\tcase binary.LittleEndian:\n\t\tle = true\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"invalid byte order %v\", byteOrder))\n\t}\n\treturn le\n}",
"func (d *Debugger) Endianness() (Endianness, error) {\n\tregs, err := d.ReadRegAll()\n\tif err != nil {\n\t\treturn LittleEndian, err\n\t}\n\n\treturn d.arch.endianness(regs), nil\n}",
"func ReaderByteOrder(r *dwarf.Reader,) binary.ByteOrder",
"func BinaryOrder(buf []byte) binary.ByteOrder {\n\tif isTiffBigEndian(buf[:4]) {\n\t\treturn binary.BigEndian\n\t}\n\tif isTiffLittleEndian(buf[:4]) {\n\t\treturn binary.LittleEndian\n\t}\n\treturn nil\n}",
"func nativeToBigEndian(i uint32) uint32 {\n\tif cpu.IsBigEndian {\n\t\treturn i\n\t}\n\treturn bits.ReverseBytes32(i)\n}",
"func StringToByteOrder(s string) binary.ByteOrder {\n\tswitch strings.ToLower(s) {\n\tcase \"ndr\":\n\t\treturn binary.LittleEndian\n\tcase \"xdr\":\n\t\treturn binary.BigEndian\n\tdefault:\n\t\treturn DefaultEWKBEncodingFormat\n\t}\n}",
"func init() {\n\ti := uint32(1)\n\tb := (*[4]byte)(unsafe.Pointer(&i))\n\tif b[0] == 1 {\n\t\tnativeEndian = binary.LittleEndian\n\t} else {\n\t\tnativeEndian = binary.BigEndian\n\t}\n}",
"func DwarfEndian(infoSec []byte) binary.ByteOrder {\n\tif len(infoSec) < 6 {\n\t\treturn binary.BigEndian\n\t}\n\tx, y := infoSec[4], infoSec[5]\n\tswitch {\n\tcase x == 0 && y == 0:\n\t\treturn binary.BigEndian\n\tcase x == 0:\n\t\treturn binary.BigEndian\n\tcase y == 0:\n\t\treturn binary.LittleEndian\n\tdefault:\n\t\treturn binary.BigEndian\n\t}\n}",
"func init() {\n\ti := 0x1\n\tb := (*[intSize]byte)(unsafe.Pointer(&i))\n\tif b[0] == 1 {\n\t\t// LittleEndian is the little-endian implementation of ByteOrder\n\t\tNativeEndian = binary.LittleEndian\n\t} else {\n\t\t// BigEndian is the Big-endian implementation of ByteOrder\n\t\tNativeEndian = binary.BigEndian\n\t}\n}",
"func ConvertEndian(num uint32) uint32 {\n\treturn ((num >> 24) & 0xff) | // move byte 3 to byte 0\n\t\t((num << 8) & 0xff0000) | // move byte 1 to byte 2\n\t\t((num >> 8) & 0xff00) | // move byte 2 to byte 1\n\t\t((num << 24) & 0xff000000)\n}",
"func ToNetworkOrder32(n uint32) uint32 {\n\tif native == networkOrder {\n\t\treturn n\n\t} else {\n\t\treturn ntohl(htohl(n))\n\t}\n}",
"func Btf__endianness(btf *Struct_btf) Enum_btf_endianness {\n\treturn C.btf__endianness()\n}",
"func getBigEndian() ([]string, error) {\n\tfilename := runtime.GOROOT() + \"/src/internal/goarch/goarch.go\"\n\tfs := token.NewFileSet()\n\tfileAST, err := parser.ParseFile(fs, filename, nil, parser.Mode(0))\n\t//fileAST, err := parser.ParseFile(fs, filename, nil, parser.Trace)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t/*\n\n\t\t// BigEndian reports whether the architecture is big-endian.\n\t\tconst BigEndian = IsArmbe|IsArm64be|IsMips|IsMips64|IsPpc|IsPpc64|IsS390|IsS390x|IsSparc|IsSparc64 == 1\n\n\t*/\n\n\tif len(fileAST.Decls) == 0 {\n\t\treturn nil, fmt.Errorf(\"%s: no Decls in AST\", filename)\n\t}\n\t// fmt.Printf(\"%#v\\n\", fileAST.Decls)\n\tfor _, decl := range fileAST.Decls {\n\t\tdecl, ok := decl.(*ast.GenDecl)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif decl.Tok != token.CONST {\n\t\t\tcontinue\n\t\t}\n\t\tspec := decl.Specs[0].(*ast.ValueSpec)\n\t\tif len(spec.Names) != 1 || spec.Names[0].Name != \"BigEndian\" {\n\t\t\tcontinue\n\t\t}\n\t\t// We found the const \"BigEndian\"\n\t\t// Let's extract its value!\n\t\tif len(spec.Values) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"%s: single value expected for const BigEndian\", filename)\n\t\t}\n\n\t\tvar archs []string\n\n\t\tlist := spec.Values[0].(*ast.BinaryExpr).X.(*ast.BinaryExpr)\n\t\tfor {\n\t\t\tarch := strings.ToLower(strings.TrimPrefix(list.Y.(*ast.Ident).Name, \"Is\"))\n\t\t\tarchs = append(archs, arch)\n\n\t\t\tvar ok bool\n\t\t\tlist2, ok := list.X.(*ast.BinaryExpr)\n\t\t\tif !ok {\n\t\t\t\tarch = strings.ToLower(strings.TrimPrefix(list.X.(*ast.Ident).Name, \"Is\"))\n\t\t\t\tarchs = append(archs, arch)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlist = list2\n\t\t}\n\n\t\t// Reverse\n\t\tfor i, j := 0, len(archs)-1; i < j; i, j = i+1, j-1 {\n\t\t\tarchs[i], archs[j] = archs[j], archs[i]\n\t\t}\n\n\t\treturn archs, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"%s: const BigEndian not found\", filename)\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Init creates a new light string | func (ls *LightString) Init() {
log.Infof("Creating %v pixel light string", ls.Count)
ls.Last = ls.Count // default last pixel to count
n := 0
for n < ls.Count {
ls.Pixels = append(ls.Pixels, &Pixel{
Color: defaultPixelColor,
Brightness: defaultPixelBrightness,
})
n++
}
} | [
"func (td *TextDisplay)Init(){\r\n\tfontBytes, err := ioutil.ReadFile(\"Blockstepped.ttf\")\r\n\tif err != nil {\r\n\t\tlog.Println(err)\r\n\t\treturn\r\n\t}\r\n\tvar err2 error\r\n\ttd.font, err2 = truetype.Parse(fontBytes)\r\n\tif err2 != nil {\r\n\t\tlog.Fatal(err)\r\n\t}\r\n\ttd.mplusNormalFont = truetype.NewFace(td.font, &truetype.Options{\r\n\t\tSize: 24,\r\n\t\tDPI: 72,\r\n\t\tHinting: font.HintingFull,\r\n\t})\r\n\ttd.text = \"Game Over!!!!!!!!!!!!! - Press Enter to restart!\"\r\n}",
"func (c *ChromaHighlight) init() (err error) {\n\n\t// Option handling registering formatters\n\tswitch c.formatter {\n\tcase \"gtkDirectToTextBuffer\":\n\t\tformatters.Register(\"gtkDirectToTextBuffer\", chroma.FormatterFunc(c.gtkDirectToTextBufferFormatter))\n\tcase \"gtkTextBuffer\":\n\t\tformatters.Register(\"gtkTextBuffer\", chroma.FormatterFunc(c.gtkTextBufferFormatter))\n\tcase \"pango\":\n\t\tformatters.Register(\"pango\", chroma.FormatterFunc(c.pangoFormatter))\n\t}\n\n\t// Used to parse GdkColor\n\tc.regBG = regexp.MustCompile(`bg:#[a-fA-F|0-9]{6}`)\n\tc.regFG = regexp.MustCompile(`#[a-fA-F|0-9]{6}`)\n\n\tc.RemoveTags()\n\n\t// To check if source text have been modified.\n\tc.md5SizeAnalyze = 1024 // Set to 0 means there is no limit\n\n\tswitch c.srcBuff {\n\tcase nil:\n\t\tc.textTagTable, err = c.txtBuff.GetTagTable()\n\tdefault:\n\t\tc.textTagTable, err = c.srcBuff.GetTagTable()\n\t}\n\treturn\n}",
"func NewFromStr(contents string) *Render {\n\treturn &Render{\n\t\tLines: []Line{\n\t\t\tLine{\n\t\t\t\tContent: \"one\",\n\t\t\t},\n\t\t\tLine{\n\t\t\t\tContent: \"two\",\n\t\t\t},\n\t\t},\n\t}\n}",
"func init() {\n\tflag.Usage = usage\n\t// NOTE: This next line is key you have to call flag.Parse() for the command line\n\t// options or \"flags\" that are defined in the glog module to be picked up.\n\tflag.Parse()\n\n\tmodelName = \"RGB-LIGHT\"\n\tDeviceName = \"rgb-light-device\"\n\tDeviceName = os.Getenv(\"DEVICE_NAME\")\n\n\tMQTTURL = \"tcp://127.0.0.1:1884\"\n\trgb = []string{\"red-pwm\", \"green-pwm\", \"blue-pwm\"}\n\tdefaultRGBValue = []int{50, 50, 50}\n\trpin, _ := strconv.Atoi(os.Getenv(\"RPIN\"))\n\tgpin, _ := strconv.Atoi(os.Getenv(\"GPIN\"))\n\tbpin, _ := strconv.Atoi(os.Getenv(\"BPIN\"))\n\trgbPinNumber = []int{rpin, gpin, bpin}\n\n\tglog.Info(\"Init MQTT client...\")\n\tClientOpts = HubClientInit(MQTTURL, \"eventbus\", \"\", \"\")\n\tClient = MQTT.NewClient(ClientOpts)\n\tif Token_client = Client.Connect(); Token_client.Wait() && Token_client.Error() != nil {\n\t\tglog.Error(\"client.Connect() Error is \", Token_client.Error())\n\t}\n\terr := LoadConfigMap()\n\tif err != nil {\n\t\tglog.Error(errors.New(\"Error while reading from config map \" + err.Error()))\n\t\tos.Exit(1)\n\t}\n}",
"func (s *Service) InitColor() {\n\ts.color = GetNextFriendlyColor()\n}",
"func (ls *LightString) Render() {\n\tlog.Debug(\"Rendering string\")\n}",
"func (l LangPackString) construct() LangPackStringClass { return &l }",
"func (st *State) Init(variant Variant) {\n\tst.Variant = variant\n\tst.ParseFen(VariantInfos[st.Variant].StartFen)\n}",
"func (i *Iter) InitString(f Form, src string) {\n\ti.p = 0\n\tif len(src) == 0 {\n\t\ti.setDone()\n\t\ti.rb.nsrc = 0\n\t\treturn\n\t}\n\ti.multiSeg = nil\n\ti.rb.initString(f, src)\n\ti.next = i.rb.f.nextMain\n\ti.asciiF = nextASCIIString\n\ti.info = i.rb.f.info(i.rb.src, i.p)\n\ti.rb.ss.first(i.info)\n}",
"func (ld *LEDraw) Init() {\n\tif ld.ImgSize.X == 0 || ld.ImgSize.Y == 0 {\n\t\tld.Defaults()\n\t}\n\tif ld.Image != nil {\n\t\tcs := ld.Image.Bounds().Size()\n\t\tif cs != ld.ImgSize {\n\t\t\tld.Image = nil\n\t\t}\n\t}\n\tif ld.Image == nil {\n\t\tld.Image = image.NewRGBA(image.Rectangle{Max: ld.ImgSize})\n\t}\n\tld.Render.Init(ld.ImgSize.X, ld.ImgSize.Y, ld.Image)\n\tld.Paint.Defaults()\n\tld.Paint.StrokeStyle.Width.SetPct(ld.Width)\n\tld.Paint.StrokeStyle.Color.SetName(string(ld.LineColor))\n\tld.Paint.FillStyle.Color.SetName(string(ld.BgColor))\n\tld.Paint.SetUnitContextExt(ld.ImgSize)\n}",
"func InitToml(s string) {\n\t_, err := toml.DecodeFile(s, &Cfg)\n\tif err != nil {\n\t\tuseDefaults()\n\t}\n}",
"func New() *NL { return &NL{Output: bytes.NewBufferString(\"\")} }",
"func (shell *FishShell) Init() string {\n\treturn packr.NewBox(\"assets\").String(\"leader.fish.sh\")\n}",
"func init() {\n\tinternal.TypeString = TypeString\n\tinternal.TypeStringOn = TypeStringOn\n}",
"func (rndr *Renderer) Init(snatOnly bool) error {\n\trndr.snatOnly = snatOnly\n\trndr.natGlobalCfg = &vpp_nat.Nat44Global{\n\t\tForwarding: true,\n\t}\n\tif rndr.Config == nil {\n\t\trndr.Config = config.DefaultConfig()\n\t}\n\treturn nil\n}",
"func (shell *POSIXShell) Init() string {\n\treturn packr.NewBox(\"assets\").String(fmt.Sprintf(\"leader.%s.sh\", shell.name))\n}",
"func (c *Conf) InitFromString(content string) error {\n\treturn c.InitFromBytes(([]byte)(content))\n}",
"func NewFromString(htmlString string) (r *Recipe, err error) {\n\tr = &Recipe{FileName: \"string\"}\n\tr.FileContent = htmlString\n\terr = r.parseHTML()\n\treturn\n}",
"func (tx *TextureBase) Init(sc *Scene) error {\n\tif tx.Tex != nil {\n\t\ttx.Tex.SetBotZero(tx.Bot0)\n\t\ttx.Tex.Activate(0)\n\t}\n\treturn nil\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
FillString writes a color to all pixels | func (ls *LightString) FillString(color uint8) {
log.Debugf("Filling pixels with color %v", color)
n := 0
for n < ls.Last {
ls.Pixels[n].Color = color
log.Debugf("Coloring pixel %v %v", n, color)
n++
}
} | [
"func (rgc *RasterGraphicContext) FillStringAt(text string, x, y float64) (cursor float64, err error) {\n\tcursor, err = rgc.CreateStringPath(text, x, y)\n\trgc.Fill()\n\treturn\n}",
"func (t *Textile) Fill(str string) {\n\tarea := t.Rect\n\tfor y := area.Min.Y; y < area.Max.Y; y++ {\n\t\tfor x := area.Min.X; x < area.Max.X; x++ {\n\t\t\tt.Set(x, y, str)\n\t\t}\n\t}\n}",
"func PaintFill(image [][]rune, row, col int, newColor rune) {\n\tpaintFillHelper(image, row, col, image[row][col], newColor)\n}",
"func (tb *Textbox) Fill(u rune) error {\n\tif !utf8.ValidRune(u) {\n\t\treturn errors.New(\"invalid rune\")\n\t}\n\n\tfor i := range tb.pixels {\n\t\ttb.pixels[i] = u\n\t}\n\treturn nil\n}",
"func (c *Context2D) FillText(text string, p *Point) { c.Call(\"fillText\", text, p.X, p.Y) }",
"func AsStringFill(value string) AsStringAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"fill\"] = value\n\t}\n}",
"func fill(pix []byte, c color.RGBA) {\n\tfor i := 0; i < len(pix); i += 4 {\n\t\tpix[i] = c.R\n\t\tpix[i+1] = c.G\n\t\tpix[i+2] = c.B\n\t\tpix[i+3] = c.A\n\t}\n}",
"func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }",
"func (b Buffer) Fill(ch rune, fg, bg Attribute) {\n\tfor x := b.Area.Min.X; x < b.Area.Max.X; x++ {\n\t\tfor y := b.Area.Min.Y; y < b.Area.Max.Y; y++ {\n\t\t\tb.Set(x, y, Cell{ch, fg, bg})\n\t\t}\n\t}\n}",
"func (cv *Canvas) FillText(str string, x, y float64) {\n\tif cv.state.font.font == nil {\n\t\treturn\n\t}\n\n\tscaleX := backendbase.Vec{cv.state.transform[0], cv.state.transform[1]}.Len()\n\tscaleY := backendbase.Vec{cv.state.transform[2], cv.state.transform[3]}.Len()\n\tscale := (scaleX + scaleY) * 0.5\n\tfontSize := fixed.Int26_6(math.Round(float64(cv.state.fontSize) * scale))\n\n\t// if the font size is large or rotated or skewed in some way, use the\n\t// triangulated font rendering\n\tif fontSize > fixed.I(25) {\n\t\tcv.fillText2(str, x, y)\n\t\treturn\n\t}\n\tmat := cv.state.transform\n\tif mat[1] != 0 || mat[2] != 0 || mat[0] != mat[3] {\n\t\tcv.fillText2(str, x, y)\n\t\treturn\n\t}\n\n\tfrc := cv.getFRContext(cv.state.font, fontSize)\n\tfnt := cv.state.font.font\n\n\tstrWidth, strHeight, textOffset, str := cv.measureTextRendering(str, &x, &y, frc, scale)\n\tif strWidth <= 0 || strHeight <= 0 {\n\t\treturn\n\t}\n\n\t// make sure textImage is large enough for the rendered string\n\tif textImage == nil || textImage.Bounds().Dx() < strWidth || textImage.Bounds().Dy() < strHeight {\n\t\tvar size int\n\t\tfor size = 2; size < alphaTexSize; size *= 2 {\n\t\t\tif size >= strWidth && size >= strHeight {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif size > alphaTexSize {\n\t\t\tsize = alphaTexSize\n\t\t}\n\t\ttextImage = image.NewAlpha(image.Rect(0, 0, size, size))\n\t}\n\n\t// clear the render region in textImage\n\tfor y := 0; y < strHeight; y++ {\n\t\toff := textImage.PixOffset(0, y)\n\t\tline := textImage.Pix[off : off+strWidth]\n\t\tfor i := range line {\n\t\t\tline[i] = 0\n\t\t}\n\t}\n\n\t// render the string into textImage\n\tcurX := x\n\tp := fixed.Point26_6{}\n\tprev, hasPrev := truetype.Index(0), false\n\tfor _, rn := range str {\n\t\tidx := fnt.Index(rn)\n\t\tif idx == 0 {\n\t\t\tprev = 0\n\t\t\thasPrev = false\n\t\t\tcontinue\n\t\t}\n\t\tif hasPrev {\n\t\t\tkern := fnt.Kern(fontSize, prev, idx)\n\t\t\tif frc.hinting != font.HintingNone {\n\t\t\t\tkern = (kern + 32) &^ 63\n\t\t\t}\n\t\t\tcurX += float64(kern) / 64\n\t\t}\n\t\tadvance, mask, offset, err := frc.glyph(idx, p)\n\t\tif err != nil {\n\t\t\tprev = 0\n\t\t\thasPrev = false\n\t\t\tcontinue\n\t\t}\n\t\tp.X += advance\n\n\t\tdraw.Draw(textImage, mask.Bounds().Add(offset).Sub(textOffset), mask, image.ZP, draw.Over)\n\n\t\tcurX += float64(advance) / 64\n\t}\n\n\t// render textImage to the screen\n\tvar pts [4]backendbase.Vec\n\tpts[0] = cv.tf(backendbase.Vec{float64(textOffset.X)/scale + x, float64(textOffset.Y)/scale + y})\n\tpts[1] = cv.tf(backendbase.Vec{float64(textOffset.X)/scale + x, float64(textOffset.Y)/scale + float64(strHeight)/scale + y})\n\tpts[2] = cv.tf(backendbase.Vec{float64(textOffset.X)/scale + float64(strWidth)/scale + x, float64(textOffset.Y)/scale + float64(strHeight)/scale + y})\n\tpts[3] = cv.tf(backendbase.Vec{float64(textOffset.X)/scale + float64(strWidth)/scale + x, float64(textOffset.Y)/scale + y})\n\n\tmask := textImage.SubImage(image.Rect(0, 0, strWidth, strHeight)).(*image.Alpha)\n\n\tcv.drawShadow(pts[:], mask, false)\n\n\tstl := cv.backendFillStyle(&cv.state.fill, 1)\n\tcv.b.FillImageMask(&stl, mask, pts)\n}",
"func ColorString(code int, str string) string {\n\treturn fmt.Sprintf(\"\\x1b[%d;1m%s\\x1b[39;22m\", code, str)\n}",
"func (gc *GraphicContext) FillStringAt(text string, x, y float64) (width float64) {\n\tf, err := gc.loadCurrentFont()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn 0.0\n\t}\n\tstartx := x\n\tprev, hasPrev := truetype.Index(0), false\n\tfontName := gc.GetFontName()\n\tfor _, r := range text {\n\t\tindex := f.Index(r)\n\t\tif hasPrev {\n\t\t\tx += fUnitsToFloat64(f.Kern(fixed.Int26_6(gc.Current.Scale), prev, index))\n\t\t}\n\t\tglyph := gc.glyphCache.Fetch(gc, fontName, r)\n\t\tx += glyph.Fill(gc, x, y)\n\t\tprev, hasPrev = index, true\n\t}\n\treturn x - startx\n}",
"func (t *TerminalLogger) drawString(x, y, w int, text string, fg, bg termbox.Attribute) {\n\tpos := x\n\tfor _, ch := range text {\n\t\ttermbox.SetCell(pos, y, ch, fg, bg)\n\t\tpos++\n\t\tif pos > w {\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (me *TxsdPresentationAttributesColorColorInterpolation) Set(s string) { (*xsdt.String)(me).Set(s) }",
"func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }",
"func (r *RasterTextNode) SetFill(fill int) {\n\tr.fill = fill\n}",
"func Color(str string, color int) string {\n\treturn applyTransform(str, func(idx int, line string) string {\n\t\treturn fmt.Sprintf(\"%s%s%s\", getColor(color), line, RESET)\n\t})\n}",
"func (r *Renderer) FillCharCoordinate(cx, cy int, c utils.ColorRGBA) {\n\tcr, cg, cb, ca, _ := r.renderer.GetDrawColor()\n\tvar bm sdl.BlendMode\n\tr.renderer.GetDrawBlendMode(&bm)\n\tr.renderer.SetDrawBlendMode(sdl.BLENDMODE_BLEND)\n\tr.renderer.SetDrawColor(c.R, c.G, c.B, c.A)\n\tr.renderer.FillRect(&sdl.Rect{X: int32((cx + r.OriginX) * r.GlyphWidth), Y: int32((cy + r.OriginY) * r.GlyphHeight), W: int32(r.GlyphWidth), H: int32(r.GlyphHeight)})\n\tr.renderer.SetDrawColor(cr, cg, cb, ca)\n\tr.renderer.SetDrawBlendMode(bm)\n}",
"func (p *Page) FillRGB(r, g, b float64) {\n\tfmt.Fprint(p.contents, r, g, b, \" rg \")\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Render writes the string to hardware | func (ls *LightString) Render() {
log.Debug("Rendering string")
} | [
"func (g *Game) Render() string {\n\tascii := \"\"\n\n\tm := g.generateScreen()\n\tfor _, row := range m.cells {\n\t\tascii += strings.Join(row, \"\") + \"\\n\"\n\t}\n\n\treturn ascii\n}",
"func (d *Device) Render() error {\n\tbuf := new(bytes.Buffer)\n\n\tfor _, chain := range d.LEDs {\n\t\tfor _, col := range chain {\n\t\t\tbuf.Write([]byte{col.R, col.G, col.B})\n\t\t}\n\t}\n\n\t_, err := Conn.WriteToUDP(buf.Bytes(), d.Addr)\n\treturn err\n}",
"func (c *Ctx) RenderString(code int, s string) {\n\tc.W.WriteHeader(code)\n\tc.W.Write([]byte(s))\n}",
"func (s *Source) Render() ([]byte, error) {\n\tvar ret bytes.Buffer\n\tfmt.Fprintf(&ret, \"<source>\")\n\tfmt.Fprintf(&ret, \"\\n @type forward\")\n\tfmt.Fprintf(&ret, \"\\n port %d\", s.port)\n\tfmt.Fprintf(&ret, \"\\n bind 0.0.0.0\")\n\tfmt.Fprintf(&ret, \"\\n</source>\")\n\n\treturn ret.Bytes(), nil\n}",
"func (c *Controller) RenderString() (string, error) {\n\tb, e := c.RenderBytes()\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\treturn string(b), e\n}",
"func (v Binary) Render(i, width int, baseStyle lipgloss.Style) string {\n\tw := dataWidth(width)\n\t_, err := v.Seek(int64(i*w), io.SeekStart)\n\tif err != nil {\n\t\treturn baseStyle.Blink(true).Render(err.Error())\n\t}\n\tif len(v.b) != w {\n\t\tv.b = make([]byte, w)\n\t}\n\tn, err := v.Read(v.b)\n\tif err != nil && !errors.Is(err, io.EOF) {\n\t\treturn baseStyle.Blink(true).Render(err.Error())\n\t}\n\ts := fmt.Sprintf(\"% X%s \", v.b[0:n], strings.Repeat(\" \", w-n))\n\tvar x strings.Builder\n\tfor i := 0; i < n; i++ {\n\t\tif unicode.IsPrint(rune(v.b[i])) {\n\t\t\tx.WriteRune(rune(v.b[i]))\n\t\t} else {\n\t\t\tx.WriteRune('.')\n\t\t}\n\t}\n\treturn baseStyle.Render(s + x.String())\n}",
"func Render(dest *[]byte, src []byte, m Mode) error {\n\tvar err error\n\t*dest, err = render(*dest, src, C.Mode(m))\n\treturn err\n}",
"func (msg *Message) Render() string {\n\treturn msg.RenderBuffer().String()\n}",
"func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) []byte {\n\thtmlWriter := org.NewHTMLWriter()\n\n\trenderer := &Renderer{\n\t\tHTMLWriter: htmlWriter,\n\t\tURLPrefix: urlPrefix,\n\t\tIsWiki: isWiki,\n\t}\n\n\thtmlWriter.ExtendingWriter = renderer\n\n\tres, err := org.New().Silent().Parse(bytes.NewReader(rawBytes), \"\").Write(renderer)\n\tif err != nil {\n\t\tlog.Error(\"Panic in orgmode.Render: %v Just returning the rawBytes\", err)\n\t\treturn rawBytes\n\t}\n\treturn []byte(res)\n}",
"func (m *Margaid) Render(writer io.Writer) error {\n\trendered := m.g.Render()\n\t_, err := writer.Write([]byte(rendered))\n\treturn err\n}",
"func RenderString(s string) {\n\tq, err := qrcode.New(s, qrcode.Medium)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Println(q.ToSmallString(false))\n}",
"func RenderString(s string) {\n\tq, err := qrcode.New(s, qrcode.Medium)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(q.ToSmallString(false))\n}",
"func (f *Frontend) Render(w io.Writer, c Casing) error {\n o := f.templateHtml\n for key, r := range f.replacements {\n o = []byte( r.regex.ReplaceAllString( string(o), fmt.Sprintf(r.repl, c[key])))\n }\n w.Write(o)\n return nil\n}",
"func (self *RenderWindow) DrawString(string String*) void {\n return C.sfRenderWindow_DrawString(self.Cref, string.Cref)\n}",
"func (r *render) RenderString(e *entry) string {\n\tr.mu.Lock()\n\tr.buf = r.buf[0:0]\n\trenderEntry(&r.buf, e)\n\tstr := string(r.buf)\n\tr.mu.Unlock()\n\treturn str\n}",
"func Render(w io.Writer, template string, data interface{}) error {\n\tif err := renderer.HTML(w, 0, template, data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *Car) Render(out chan<- string) {\n\tdefer close(out)\n\n\tvar osSymbolPaint string\n\tif osSymbolPaint = os.Getenv(\"BULLETTRAIN_CAR_OS_SYMBOL_PAINT\"); osSymbolPaint == \"\" {\n\t\tosSymbolPaint = symbolPaint\n\t}\n\n\tvar s string\n\tif s = os.Getenv(\"BULLETTRAIN_CAR_OS_TEMPLATE\"); s == \"\" {\n\t\ts = carTemplate\n\t}\n\n\tfuncMap := template.FuncMap{\n\t\t// Pipeline functions for colouring.\n\t\t\"c\": func(t string) string { return ansi.Color(t, c.GetPaint()) },\n\t\t\"cs\": func(t string) string { return ansi.Color(t, osSymbolPaint) },\n\t}\n\n\tosName := FindOutOs()\n\ttpl := template.Must(template.New(\"os\").Funcs(funcMap).Parse(s))\n\tdata := struct {\n\t\tIcon string\n\t\tName string\n\t}{Icon: symbol(osName), Name: osName}\n\tfromTpl := new(bytes.Buffer)\n\terr := tpl.Execute(fromTpl, data)\n\tif err != nil {\n\t\tlog.Fatalf(\"Can't generate the OS template: %s\", err.Error())\n\t}\n\n\tout <- fromTpl.String()\n}",
"func (t *Table) Render() string {\n\tfmt.Fprintln(t.w, \"-\")\n\tt.w.Flush()\n\n\treturn t.buf.String()\n}",
"func ExampleRender() {\n\tconst s = `\n\tFirst Line\n\tSecond Line\n\tThird Line\n\tHello\n\tThis is go-music`\n\n\tfmt.Println(RenderText(s, Spring))\n\tfmt.Println(RenderText(s, Autumn))\n\tfmt.Println(RenderText(s, Winter))\n\tfmt.Println(RenderText(s, Rose))\n\tfmt.Println(RenderText(s, Valentine))\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewDirectRequestSpec initializes a new DirectRequestSpec from a job.DirectRequestSpec | func NewDirectRequestSpec(spec *job.DirectRequestSpec) *DirectRequestSpec {
return &DirectRequestSpec{
ContractAddress: spec.ContractAddress,
MinIncomingConfirmations: spec.MinIncomingConfirmations,
MinIncomingConfirmationsEnv: spec.MinIncomingConfirmationsEnv,
MinContractPayment: spec.MinContractPayment,
Requesters: spec.Requesters,
// This is hardcoded to runlog. When we support other initiators, we need
// to change this
Initiator: "runlog",
CreatedAt: spec.CreatedAt,
UpdatedAt: spec.UpdatedAt,
EVMChainID: spec.EVMChainID,
}
} | [
"func NewDirectRequestSpec(spec *job.DirectRequestSpec) *DirectRequestSpec {\n\treturn &DirectRequestSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tOnChainJobSpecID: spec.OnChainJobSpecID.String(),\n\t\t// This is hardcoded to runlog. When we support other intiators, we need\n\t\t// to change this\n\t\tInitiator: \"runlog\",\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}",
"func (c *InputService17ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService18ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService22ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService20ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService19ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (s Benchmark_send_Params) NewReq() (Message, error) {\n\tss, err := NewMessage(s.Struct.Segment())\n\tif err != nil {\n\t\treturn Message{}, err\n\t}\n\terr = s.Struct.SetPtr(0, ss.Struct.ToPtr())\n\treturn ss, err\n}",
"func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func (c *InputService21ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
NewFluxMonitorSpec initializes a new DirectFluxMonitorSpec from a job.FluxMonitorSpec | func NewFluxMonitorSpec(spec *job.FluxMonitorSpec) *FluxMonitorSpec {
var drumbeatSchedulePtr *string
if spec.DrumbeatEnabled {
drumbeatSchedulePtr = &spec.DrumbeatSchedule
}
var drumbeatRandomDelayPtr *string
if spec.DrumbeatRandomDelay > 0 {
drumbeatRandomDelay := spec.DrumbeatRandomDelay.String()
drumbeatRandomDelayPtr = &drumbeatRandomDelay
}
return &FluxMonitorSpec{
ContractAddress: spec.ContractAddress,
Threshold: float32(spec.Threshold),
AbsoluteThreshold: float32(spec.AbsoluteThreshold),
PollTimerPeriod: spec.PollTimerPeriod.String(),
PollTimerDisabled: spec.PollTimerDisabled,
IdleTimerPeriod: spec.IdleTimerPeriod.String(),
IdleTimerDisabled: spec.IdleTimerDisabled,
DrumbeatEnabled: spec.DrumbeatEnabled,
DrumbeatSchedule: drumbeatSchedulePtr,
DrumbeatRandomDelay: drumbeatRandomDelayPtr,
MinPayment: spec.MinPayment,
CreatedAt: spec.CreatedAt,
UpdatedAt: spec.UpdatedAt,
EVMChainID: spec.EVMChainID,
}
} | [
"func NewFluxMonitorSpec(spec *job.FluxMonitorSpec) *FluxMonitorSpec {\n\treturn &FluxMonitorSpec{\n\t\tContractAddress: spec.ContractAddress,\n\t\tPrecision: spec.Precision,\n\t\tThreshold: spec.Threshold,\n\t\tAbsoluteThreshold: spec.AbsoluteThreshold,\n\t\tPollTimerPeriod: spec.PollTimerPeriod.String(),\n\t\tPollTimerDisabled: spec.PollTimerDisabled,\n\t\tIdleTimerPeriod: spec.IdleTimerPeriod.String(),\n\t\tIdleTimerDisabled: spec.IdleTimerDisabled,\n\t\tMinPayment: spec.MinPayment,\n\t\tCreatedAt: spec.CreatedAt,\n\t\tUpdatedAt: spec.UpdatedAt,\n\t}\n}",
"func (in *FluxSpec) DeepCopy() *FluxSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(FluxSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}",
"func newMonitor() (*monitor, error) {\n\tudev, err := libudevwrapper.NewUdev()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tudevMonitor, err := udev.NewDeviceFromNetlink(libudevwrapper.UDEV_SOURCE)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = udevMonitor.AddSubsystemFilter(libudevwrapper.UDEV_SUBSYSTEM)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = udevMonitor.EnableReceiving()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmonitor := &monitor{\n\t\tudev: udev,\n\t\tudevMonitor: udevMonitor,\n\t}\n\treturn monitor, nil\n}",
"func newFakeReconciler(cfg Config, initObjects ...runtime.Object) *Reconciler {\n\tfakeClient := fakeclient.\n\t\tNewClientBuilder().\n\t\tWithScheme(scheme.Scheme).\n\t\tWithRuntimeObjects(initObjects...).\n\t\tWithStatusSubresource(&autoscalingv1beta1.MachineAutoscaler{}).\n\t\tBuild()\n\treturn &Reconciler{\n\t\tclient: fakeClient,\n\t\tscheme: scheme.Scheme,\n\t\trecorder: record.NewFakeRecorder(128),\n\t\tconfig: cfg,\n\t}\n}",
"func NewMonitor(inputChan chan *TripsOfSec) *Monitor {\n\tm := &Monitor{\n\t\tInputChan: inputChan,\n\t\tprocessingStats: map[int64]*ProcessingStat{},\n\t\tResultChan: make(chan Stat, 1024),\n\t}\n\tgo m.consume()\n\treturn m\n}",
"func NewMonitor(c context.Context) *Monitor {\n\treturn &Monitor{StatPB: make(chan pb.Stat, 1),\n\t\tticker: time.NewTicker(time.Duration(config.CfgWorker.LoadReport.LoadReportInterval) * time.Second),\n\t\tadapterIdx: -1,\n\t\tCtx: c,\n\t\tStat: stat.NewStat()}\n}",
"func New(dir string) (*Monitor, error) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create watcher: %w\", err)\n\t}\n\tif err = watcher.Add(dir); err != nil {\n\t\twatcher.Close()\n\t\treturn nil, fmt.Errorf(\"failed to watch %v: %w\", dir, err)\n\t}\n\treturn &Monitor{\n\t\twatcher: watcher,\n\t\tBackups: filter(watcher.Events),\n\t\tErrors: errors(watcher.Errors),\n\t}, nil\n}",
"func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {\n\tmon := &monitor{\n\t\tui: &cli.PrefixedUi{\n\t\t\tInfoPrefix: \"==> \",\n\t\t\tOutputPrefix: \" \",\n\t\t\tErrorPrefix: \"==> \",\n\t\t\tUi: ui,\n\t\t},\n\t\tclient: client,\n\t\tstate: newEvalState(),\n\t\tlength: length,\n\t}\n\treturn mon\n}",
"func NewSpec(details *SpecDetails) *Spec {\n\treturn &Spec{\n\t\tDetails: details,\n\t\tServices: NewServiceList(),\n\t\tStatus: SpecWaiting,\n\t}\n}",
"func NewMonitor(\n\tname string,\n\tres Resource,\n\tcurCount *metric.Gauge,\n\tmaxHist *metric.Histogram,\n\tincrement int64,\n\tnoteworthy int64,\n) *BytesMonitor {\n\treturn NewMonitorWithLimit(\n\t\tname, res, math.MaxInt64, curCount, maxHist, increment, noteworthy)\n}",
"func NewCarbonMonitor(name string, urls []string) (monitor *Monitor) {\n\tmonitor = &Monitor{\n\t\tName: name,\n\t\tPublisher: &CarbonPublisher{\n\t\t\tURLs: urls,\n\t\t},\n\t}\n\n\treturn\n}",
"func newFakeReconciler(initObjects ...runtime.Object) *ReconcileMachineRemediation {\n\tfakeClient := fake.NewFakeClient(initObjects...)\n\tremediator := &FakeRemedatior{}\n\treturn &ReconcileMachineRemediation{\n\t\tclient: fakeClient,\n\t\tremediator: remediator,\n\t\tnamespace: consts.NamespaceOpenshiftMachineAPI,\n\t}\n}",
"func NewReactor(cfg Config) *Reactor {\n\tr := &Reactor{\n\t\tComponent: NewComponent(cfg),\n\t\tCoreTemp: cfg.BaseTempOrDefault(),\n\t\tContainmentTemp: cfg.BaseTempOrDefault(),\n\t\tControlRods: []*ControlRod{\n\t\t\tNewControlRod(cfg, 0),\n\t\t\tNewControlRod(cfg, 1),\n\t\t\tNewControlRod(cfg, 2),\n\t\t\tNewControlRod(cfg, 3),\n\t\t\tNewControlRod(cfg, 4),\n\t\t\tNewControlRod(cfg, 5),\n\t\t},\n\t\tFuelRods: []*FuelRod{\n\t\t\tNewFuelRod(cfg, 0),\n\t\t\tNewFuelRod(cfg, 1),\n\t\t\tNewFuelRod(cfg, 2),\n\t\t\tNewFuelRod(cfg, 3),\n\t\t\tNewFuelRod(cfg, 4),\n\t\t\tNewFuelRod(cfg, 5),\n\t\t\tNewFuelRod(cfg, 6),\n\t\t\tNewFuelRod(cfg, 7),\n\t\t\tNewFuelRod(cfg, 8),\n\t\t\tNewFuelRod(cfg, 9),\n\t\t\tNewFuelRod(cfg, 10),\n\t\t\tNewFuelRod(cfg, 11),\n\t\t},\n\t\tCoolant: NewCoolant(),\n\t\tPrimary: NewPump(\"primary\", cfg),\n\t\tSecondary: NewPump(\"secondary\", cfg),\n\t\tTurbine: NewTurbine(cfg),\n\t}\n\n\tr.Primary.Inlet = r.Coolant\n\tr.Primary.Outlet = r.Turbine.Coolant\n\tr.Secondary.Inlet = r.Turbine.Coolant\n\tr.Secondary.Outlet = r.Coolant\n\n\tr.ContainmentTempAlarm = NewThresholdAlarm(\n\t\t\"Containment Temp\",\n\t\tfunc() float64 { return r.ContainmentTemp },\n\t\tThresholds(ContainmentTempFatal, ContainmentTempCritical, ContainmentTempWarning),\n\t)\n\tr.CoreTempAlarm = NewThresholdAlarm(\n\t\t\"Core Temp\",\n\t\tfunc() float64 { return r.CoreTemp },\n\t\tThresholds(CoreTempFatal, CoreTempCritical, CoreTempWarning),\n\t)\n\treturn r\n}",
"func New(threads int) *MonitorImpl {\n\treturn &MonitorImpl{\n\t\tflushingStats: make([]record, 0),\n\t\tmaxLength: 100 * threads,\n\t\tlambda: 1,\n\t}\n}",
"func NewMonitorFilter(logger logrus.FieldLogger, monitorEventFilters []string) (*monitorFilter, error) {\n\tmonitorFilter := monitorFilter{logger: logger}\n\n\tfor _, filter := range monitorEventFilters {\n\t\tswitch filter {\n\t\tcase monitorAPI.MessageTypeNameDrop:\n\t\t\tmonitorFilter.drop = true\n\t\tcase monitorAPI.MessageTypeNameDebug:\n\t\t\tmonitorFilter.debug = true\n\t\tcase monitorAPI.MessageTypeNameCapture:\n\t\t\tmonitorFilter.capture = true\n\t\tcase monitorAPI.MessageTypeNameTrace:\n\t\t\tmonitorFilter.trace = true\n\t\tcase monitorAPI.MessageTypeNameL7:\n\t\t\tmonitorFilter.l7 = true\n\t\tcase monitorAPI.MessageTypeNameAgent:\n\t\t\tmonitorFilter.agent = true\n\t\tcase monitorAPI.MessageTypeNamePolicyVerdict:\n\t\t\tmonitorFilter.policyVerdict = true\n\t\tcase monitorAPI.MessageTypeNameRecCapture:\n\t\t\tmonitorFilter.recCapture = true\n\t\tcase monitorAPI.MessageTypeNameTraceSock:\n\t\t\tmonitorFilter.traceSock = true\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unknown monitor event type: %s\", filter)\n\t\t}\n\t}\n\n\tlogger.WithField(\"filters\", monitorEventFilters).Info(\"Configured Hubble with monitor event filters\")\n\treturn &monitorFilter, nil\n}",
"func NewMonitor(\n\tcctx *Context,\n\tiface string,\n\tdialer *system.Dialer,\n\twatchC <-chan netstate.Change,\n\tverbose bool,\n) *Monitor {\n\treturn &Monitor{\n\t\tcctx: cctx,\n\t\tiface: iface,\n\t\tverbose: verbose,\n\t\tdialer: dialer,\n\t\twatchC: watchC,\n\t\treadyC: make(chan struct{}),\n\n\t\t// By default use real time.\n\t\tnow: time.Now,\n\t}\n}",
"func NewMonitor(p *Probe) (*Monitor, error) {\n\tvar err error\n\tm := &Monitor{\n\t\tprobe: p,\n\t}\n\n\t// instantiate a new load controller\n\tm.loadController, err = NewLoadController(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// instantiate a new event statistics monitor\n\tm.perfBufferMonitor, err = NewPerfBufferMonitor(p)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't create the events statistics monitor: %w\", err)\n\t}\n\n\tif p.config.ActivityDumpEnabled {\n\t\tm.activityDumpManager, err = NewActivityDumpManager(p)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"couldn't create the activity dump manager: %w\", err)\n\t\t}\n\t}\n\n\tif p.config.RuntimeMonitor {\n\t\tm.runtimeMonitor = NewRuntimeMonitor(p.statsdClient)\n\t}\n\n\tm.discarderMonitor, err = NewDiscarderMonitor(p)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"couldn't create the discarder monitor: %w\", err)\n\t}\n\n\treturn m, nil\n}",
"func newStageMonitor(g reflect.Value, v specsp) (*stageMonitor, error) {\n\ttarget := v[\"target\"].(string)\n\tval := v[\"val\"].(string)\n\teval := buildMonitorEval(g, target, val)\n\tif eval == nil {\n\t\treturn nil, syscall.ENOENT\n\t}\n\tmode := int(v[\"mode\"].(float64))\n\tcolor := int(v[\"color\"].(float64))\n\tlabel := v[\"label\"].(string)\n\tx := v[\"x\"].(float64)\n\ty := v[\"y\"].(float64)\n\tvisible := v[\"visible\"].(bool)\n\treturn &stageMonitor{\n\t\ttarget: target, val: val, eval: eval,\n\t\tvisible: visible, mode: mode, color: color, x: x, y: y, label: label,\n\t}, nil\n}",
"func (in *MariaDBMonitorSpec) DeepCopy() *MariaDBMonitorSpec {\n\tif in == nil {\n\t\treturn nil\n\t}\n\tout := new(MariaDBMonitorSpec)\n\tin.DeepCopyInto(out)\n\treturn out\n}"
] | {
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.