Dataset Viewer
conversation
listlengths 2
2
|
---|
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom torch import nn\nfrom TestingCode.modules import Attention, FeedForward, PreNorm\n\n# Code from current file (TestingCode/transformer.py):\n\n\nclass Transformer(nn.Module):\n def __init__(\n self,\n dim,\n depth,\n heads,\n mlp_ratio=4.0,\n attn_dropout=0.0,\n dropout=0.0,\n qkv_bias=True,\n revised=False,\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n\n assert isinstance(\n mlp_ratio, float\n ), \"MLP ratio should be an integer for valid \"\n mlp_dim = int(mlp_ratio * dim)\n\n for _ in range(depth):\n self.layers.append(\n nn.ModuleList(\n [\n PreNorm(\n dim,\n Attention(\n dim,\n num_heads=heads,\n qkv_bias=qkv_bias,\n attn_drop=attn_dropout,\n proj_drop=dropout,\n ),\n ),\n PreNorm(\n dim,\n"
},
{
"from": "gpt",
"value": " FeedForward(dim, mlp_dim, dropout_rate=dropout,),"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport glob\nimport os\nimport random\nimport shutil\nimport time\nimport cv2\nimport math\nimport numpy as np\nimport torch\nfrom pathlib import Path\nfrom threading import Thread\nfrom PIL import Image, ExifTags\nfrom torch.utils.data import Dataset\nfrom tqdm import tqdm\nfrom ..utils import letterbox_for_img, clean_str\n\n# Code from current file (lib/dataset/DemoDataset.py):\n\n\n\nimg_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']\nvid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']\n\nclass LoadImages: # for inference\n def __init__(self, path, img_size=640):\n p = str(Path(path)) # os-agnostic\n p = os.path.abspath(p) # absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception('ERROR: %s does not exist' % p)\n\n images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]\n videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'images'\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\\nimages: %s\\nvideos: %s' % \\\n (p, img_formats, vid_formats)\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n if not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n h0, w0 = img0.shape[:2]\n\n self.frame += 1\n print('\\n video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # BGR\n #img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)\n assert img0 is not None, 'Image Not Found ' + path\n print('image %g/%g %s: \\n' % (self.count, self.nf, path), end='')\n h0, w0 = img0.shape[:2]\n\n # Padded resize # 填充尺寸,640*360*3 -> 640*384*3\n"
},
{
"from": "gpt",
"value": " img, ratio, pad = letterbox_for_img(img0, new_shape=self.img_size, auto=True)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport json\nimport argparse\nimport os\nimport os.path as osp\nimport time\nimport open3d as o3d\nimport numpy as np\nimport copy\nimport matplotlib.pyplot as plt\nfrom utils import o3dobj\nfrom utils import io\nfrom utils import tfm\n\n# Code from current file (registration.py):\n default=0.05,\n help=\"voxel size for global fast registration downsampling. default is 0.05\",\n)\nparser.add_argument(\n \"--voxel_size_icp\",\n type=float,\n default=0.05,\n help=\"voxel size for icp downsampling. default is 0.05\",\n)\nparser.add_argument(\"--skip_icp\", action=\"store_true\", help=\"skip icp and only run fgr\")\nparser.add_argument(\n \"--transformed_trajectory_out\",\n type=str,\n default=\"trajectory_1.jsonl\",\n help=\"output trajectory of the transformed trajectory 1 (to trajectory 2)\",\n)\nargs = parser.parse_args()\n\npointcloud_file_path_1 = args.pointcloud1\npointcloud_file_path_2 = args.pointcloud2\ntrajectory_file_path_1 = args.trajectory1\ntrajectory_file_path_2 = args.trajectory2\n\n\ndef preprocess_point_cloud(pcd, voxel_size):\n \"\"\"Downsamples the point cloud and computes the normals and FPFH features\"\"\"\n print(f\":: Downsample with a voxel size {voxel_size:.3f}.\")\n pcd_down = pcd.voxel_down_sample(voxel_size)\n\n radius_normal = voxel_size * 2\n print(f\":: Estimate normal with search radius {radius_normal:.3f}.\")\n pcd_down.estimate_normals(\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)\n )\n\n radius_feature = voxel_size * 5\n print(f\":: Compute FPFH feature with search radius {radius_feature:.3f}.\")\n pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(\n pcd_down,\n o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100),\n )\n return pcd_down, pcd_fpfh\n\n\ndef prepare_dataset(voxel_size):\n \"\"\"Loads two point clouds and downsamples them.\"\"\"\n print(\":: Load two point clouds\")\n source = o3d.io.read_point_cloud(pointcloud_file_path_1)\n target = o3d.io.read_point_cloud(pointcloud_file_path_2)\n\n source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)\n target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)\n return source, target, source_down, target_down, source_fpfh, target_fpfh\n\n\ndef execute_fast_global_registration(\n source_down, target_down, source_fpfh, target_fpfh, voxel_size\n):\n \"\"\"Performs fast global registration on the downsampled point clouds\"\"\"\n distance_threshold = voxel_size * 0.5\n print(\n f\":: Apply fast global registration with distance threshold {distance_threshold:.3f}\"\n )\n result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching(\n source_down,\n target_down,\n source_fpfh,\n target_fpfh,\n o3d.pipelines.registration.FastGlobalRegistrationOption(\n maximum_correspondence_distance=distance_threshold\n ),\n )\n return result\n\n\ndef execute_vanilla_icp(source, target):\n \"\"\"Performs vanilla ICP on the point clouds\"\"\"\n estimation = o3d.pipelines.registration.TransformationEstimationPointToPlane()\n\n max_correspondence_distance = 0.5\n # Convergence-Criteria for Vanilla ICP\n criteria = o3d.pipelines.registration.ICPConvergenceCriteria(\n relative_fitness=0.000001, relative_rmse=0.000001, max_iteration=50\n )\n\n result = o3d.pipelines.registration.registration_icp(\n source,\n target,\n max_correspondence_distance,\n estimation_method=estimation,\n criteria=criteria,\n )\n return result\n\n\nif __name__ == \"__main__\":\n voxel_size_fgr = args.voxel_size_fgr\n voxel_size_icp = args.voxel_size_icp\n\n (\n cloud_1,\n cloud_2,\n cloud_1_down,\n cloud_2_down,\n cloud_1_fpfh,\n cloud_2_fpfh,\n ) = prepare_dataset(voxel_size=voxel_size_fgr)\n\n color_1 = [0.9450980392, 0.5764705882, 0.7098039216]\n color_2 = [0.11, 0.72, 0.89]\n\n cloud_1.paint_uniform_color(color_1)\n cloud_2.paint_uniform_color(color_2)\n\n cloud_1_down.paint_uniform_color(color_1)\n cloud_2_down.paint_uniform_color(color_2)\n\n # axis\n axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])\n # unit block\n"
},
{
"from": "gpt",
"value": " unit_block = o3dobj.get_o3d_unit_block_at_origin()"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport math\nimport torch\nimport torch.nn.functional as F\n import fairscale.nn.model_parallel.initialize as fs_init\nfrom typing import Optional, Tuple\nfrom torch import nn\nfrom .utils import ModelArgs, repeat_kv\nfrom .position_embeding import apply_rotary_emb\n from fairscale.nn.model_parallel.layers import (\n ColumnParallelLinear,\n RowParallelLinear,\n )\n\n# Code from current file (mixtralkit/layers/attention.py):\n# Copyright (c) OpenMMLab. and affiliates.\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n\n\n \nclass TorchAttention(nn.Module):\n \"\"\"Multi-head attention module.\"\"\"\n def __init__(self, args: ModelArgs):\n \"\"\"\n Initialize the Attention module.\n\n Args:\n args (ModelArgs): Model configuration parameters.\n\n Attributes:\n n_kv_heads (int): Number of key and value heads.\n n_local_heads (int): Number of local query heads.\n n_local_kv_heads (int): Number of local key and value heads.\n n_rep (int): Number of repetitions for local heads.\n head_dim (int): Dimension size of each attention head.\n wq (ColumnParallelLinear): Linear transformation for queries.\n wk (ColumnParallelLinear): Linear transformation for keys.\n wv (ColumnParallelLinear): Linear transformation for values.\n wo (RowParallelLinear): Linear transformation for output.\n cache_k (torch.Tensor): Cached keys for attention.\n cache_v (torch.Tensor): Cached values for attention.\n\n \"\"\"\n \n\n super().__init__()\n self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads\n model_parallel_size = 1\n self.n_local_heads = args.n_heads // model_parallel_size\n self.n_local_kv_heads = self.n_kv_heads // model_parallel_size\n self.n_rep = self.n_local_heads // self.n_local_kv_heads\n self.head_dim = args.dim // args.n_heads\n\n self.wq = nn.Linear(\n args.dim,\n args.n_heads * self.head_dim,\n bias=False,\n )\n self.wk = nn.Linear(\n args.dim,\n self.n_kv_heads * self.head_dim,\n bias=False,\n )\n self.wv = nn.Linear(\n args.dim,\n self.n_kv_heads * self.head_dim,\n bias=False,\n )\n self.wo = nn.Linear(\n args.n_heads * self.head_dim,\n args.dim,\n bias=False,\n )\n\n self.cache_k = torch.zeros(\n (\n args.max_batch_size,\n args.max_seq_len,\n self.n_local_kv_heads,\n self.head_dim,\n )\n ).cuda()\n self.cache_v = torch.zeros(\n (\n args.max_batch_size,\n args.max_seq_len,\n self.n_local_kv_heads,\n self.head_dim,\n )\n ).cuda()\n\n def forward(\n self,\n x: torch.Tensor,\n start_pos: int,\n freqs_cis: torch.Tensor,\n mask: Optional[torch.Tensor],\n ):\n \"\"\"\n Forward pass of the attention module.\n\n Args:\n x (torch.Tensor): Input tensor.\n start_pos (int): Starting position for caching.\n freqs_cis (torch.Tensor): Precomputed frequency tensor.\n mask (torch.Tensor, optional): Attention mask tensor.\n\n Returns:\n torch.Tensor: Output tensor after attention.\n\n \"\"\"\n bsz, seqlen, _ = x.shape\n xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)\n\n xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)\n xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)\n xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)\n\n"
},
{
"from": "gpt",
"value": " xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport logging\nfrom homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_NAME\nfrom homeassistant.components.device_tracker.config_entry import (\n BaseTrackerEntity,\n SourceType,\n ATTR_SOURCE_TYPE,\n ATTR_LATITUDE,\n ATTR_LONGITUDE,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.typing import StateType\nfrom pyfuelprices.const import PROP_FUEL_LOCATION_SOURCE\nfrom .const import CONF_AREAS, DOMAIN\nfrom .entity import FeulStationEntity\nfrom .coordinator import FuelPricesCoordinator\n\n# Code from current file (custom_components/fuel_prices/device_tracker.py):\n\"\"\"Device tracker for fuel prices.\"\"\"\nfrom __future__ import annotations\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n \"\"\"Integration platform creation.\"\"\"\n"
},
{
"from": "gpt",
"value": " cooridinator: FuelPricesCoordinator = hass.data[DOMAIN][entry.entry_id]"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport os\nimport json\nimport sys\nimport math\nimport torch\nimport argparse\nimport transformers\nfrom peft import PeftModel\nfrom transformers import GenerationConfig\nfrom llama_attn_replace import replace_llama_attn\nfrom supervised_fine_tune import PROMPT_DICT\nfrom tqdm import tqdm\n\n# Code from current file (inference_row_pop.py):\n# import textwrap\n# from queue import Queue\n# from threading import Thread\n# import gradio as gr\n\ndef parse_config():\n parser = argparse.ArgumentParser(description='arg parser')\n parser.add_argument('--base_model', type=str, default=\"/data1/pretrained-models/llama-7b-hf\")\n parser.add_argument('--cache_dir', type=str, default=\"./cache\")\n parser.add_argument('--context_size', type=int, default=-1, help='context size during fine-tuning')\n parser.add_argument('--flash_attn', type=bool, default=False, help='')\n parser.add_argument('--temperature', type=float, default=0.6, help='')\n parser.add_argument('--top_p', type=float, default=0.9, help='')\n parser.add_argument('--max_gen_len', type=int, default=512, help='')\n parser.add_argument('--input_data_file', type=str, default='input_data/', help='')\n parser.add_argument('--output_data_file', type=str, default='output_data/', help='')\n args = parser.parse_args()\n return args\n\ndef generate_prompt(instruction, question, input_seg=None):\n if input:\n"
},
{
"from": "gpt",
"value": " return PROMPT_DICT[\"prompt_input\"].format(instruction=instruction, input_seg=input_seg, question=question)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom homeassistant.components.sensor import (\n SensorStateClass,\n SensorDeviceClass,\n)\nfrom homeassistant.const import UnitOfPower, UnitOfEnergy, UnitOfTemperature, PERCENTAGE, CONF_NAME\nfrom custom_components.little_monkey.entity import EcojokoEntity, EcojokoSensor\nfrom .const import (\n DOMAIN,\n CONF_USE_HCHP_FEATURE,\n CONF_USE_TEMPO_FEATURE,\n CONF_USE_TEMPHUM_FEATURE,\n CONF_USE_PROD_FEATURE\n)\n\n# Code from current file (custom_components/little_monkey/sensor.py):\n\"\"\"Sensor platform for mon_ecojoko.\"\"\"\nfrom __future__ import annotations\n\n\n\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n \"\"\"Set up the custom component sensors.\"\"\"\n # Fetch data or configure your sensors here\n coordinator = hass.data[DOMAIN][config_entry.entry_id]\n\n # Create the main device entity\n firmware = coordinator.data[\"gateway_firmware_version\"]\n main_device = EcojokoEntity(coordinator, config_entry.data.get(CONF_NAME), firmware)\n\n # Create child entities and link them to the main device\n\n # Real time sensor\n"
},
{
"from": "gpt",
"value": " main_device.add_child_entity(EcojokoSensor("
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport numpy as np\nimport scipy.linalg as la\nfrom polar.utils.misc import (mdot, kdot, get_spin_dim, add_spin_dim)\n\n# Code from current file (polar/basis/trans_1e.py):\n#!/usr/bin/env python\n\n\"\"\"\nTransform 1e quantities.\n\nAuthors:\n Zhi-Hao Cui\n Tianyu Zhu\n Shunyue Yuan\n\"\"\"\n\n\n\n\n\n# *****************************************************************************\n# Transform functions AO -> LO and LO -> AO\n# for h1 and rdm1\n# *****************************************************************************\n\ndef trans_h1_to_lo(h_ao_ao, C_ao_lo):\n r\"\"\"\n Transform h1 to lo basis, with kpts.\n h^{LO} = C^{\\dagger} h^{AO} C\n \"\"\"\n h_ao_ao = np.asarray(h_ao_ao)\n C_ao_lo = np.asarray(C_ao_lo)\n nkpts = C_ao_lo.shape[-3]\n nlo = C_ao_lo.shape[-1]\n res_type = np.result_type(h_ao_ao.dtype, C_ao_lo.dtype)\n\n # treat the special case where h is 0 or [0, 0]\n if h_ao_ao.ndim == 0: # scalar\n return np.ones((nkpts, nlo, nlo), dtype=res_type) * h_ao_ao\n elif h_ao_ao.ndim == 1: # [0, 0]\n spin = len(h_ao_ao)\n h_lo_lo = np.ones((spin, nkpts, nlo, nlo), dtype=res_type)\n for s in range(spin):\n h_lo_lo[s] *= h_ao_ao[s]\n return h_lo_lo\n\n if C_ao_lo.ndim == 3 and h_ao_ao.ndim == 3:\n h_lo_lo = np.zeros((nkpts, nlo, nlo), dtype=res_type)\n for k in range(nkpts):\n"
},
{
"from": "gpt",
"value": " h_lo_lo[k] = mdot(C_ao_lo[k].conj().T, h_ao_ao[k], C_ao_lo[k])"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport json\nimport os\nimport time\nfrom stravalib.util.limiter import RateLimiter, XRateLimitRule\nfrom stravalib.client import Client, exc\nfrom utils.exceptions import RateLimitException, NoInternetException\n\n# Code from current file (src/strava_gpx_uploader.py):\n\n\n\ndef get_strava_client(access_token):\n token = access_token\n rate_limiter = RateLimiter()\n rate_limiter.rules.append(XRateLimitRule(\n {'short': {'usageFieldIndex': 0, 'usage': 0,\n # 60s * 15 = 15 min\n 'limit': 100, 'time': (60 * 15),\n 'lastExceeded': None, },\n 'long': {'usageFieldIndex': 1, 'usage': 0,\n # 60s * 60m * 24 = 1 day\n 'limit': 1000, 'time': (60 * 60 * 24),\n 'lastExceeded': None}}))\n client = Client(rate_limiter=rate_limiter)\n client.access_token = token\n return client\n\n\nclass StravaGpxUploader:\n def __init__(self, file_path: str, activity_type):\n with open(\"config/strava_config.json\", 'r') as f:\n strava_config = json.load(f)\n\n # Edit access_token in the strava_config.json or edit here\n # like access_token = '***'\n self.file_path = file_path\n self.access_token = strava_config[\"access_token\"]\n self.activity_type = activity_type\n self.client = get_strava_client(self.access_token)\n\n def get_athlete_name(self):\n athlete = None\n for i in range(2):\n try:\n athlete = self.client.get_athlete()\n except exc.RateLimitExceeded as err:\n if i > 0:\n raise RateLimitException(\"Daily Rate limit exceeded\")\n print(\"Rate limit exceeded in connecting - Retrying strava connection in 15 minutes\")\n time.sleep(900)\n continue\n break\n\n print(\"Now authenticated for \" + athlete.firstname + \" \" + athlete.lastname)\n\n # client, gpxfile, strava_activity_type, notes\n def upload_gpx(self):\n gpxfile = self.file_path\n if not os.path.isfile(gpxfile):\n print(\"No file found for \" + gpxfile + \"!\")\n return False\n\n print(\"Uploading \" + gpxfile)\n\n for i in range(2):\n try:\n # 如果上传成功,则会直接到底下break\n upload = self.client.upload_activity(\n activity_file=open(gpxfile, 'r'),\n data_type='gpx',\n description='',\n activity_type=self.activity_type\n )\n except exc.RateLimitExceeded as err:\n # 第二次循环才会直接到这里\n # 这里是说今天已经超过了限制,退出程序\n if i > 0:\n raise RateLimitException(\"Daily Rate limit exceeded, please try tomorrow\")\n\n # 第一次循环会直接到这里\n # 这里是说这一次超过了限制,等待15分钟\n print(\"Rate limit exceeded in uploading - auto pausing uploads for 15 minutes to avoid rate-limit\")\n time.sleep(900)\n continue\n except ConnectionError as err:\n"
},
{
"from": "gpt",
"value": " raise NoInternetException(\"No Internet connection: {}\".format(err))"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport yaml\nimport os\nimport sys\nimport subprocess\nimport signal\nfrom pprint import pprint\nfrom multiprocessing import Process\nfrom gpt_server.utils import get_free_tcp_port, start_server, run_cmd, stop_server,delete_log\n\n# Code from current file (gpt_server/serving/main.py):\n\n# 配置根目录\nroot_dir = os.path.join(os.path.dirname(__file__), \"..\")\nroot_dir = os.path.abspath(root_dir)\nsys.path.append(root_dir)\n# 删除日志\ndelete_log(root_dir)\ndef signal_handler(signum, frame):\n stop_server()\n raise KeyboardInterrupt\n\n\nsignal.signal(signal.SIGINT, signal_handler)\n\nwith open(\"./config.yaml\", \"r\") as f:\n config = yaml.safe_load(f)\nprint(config)\n# ----------------------------启动 Controller 和 Openai API 服务----------------------------------------------------\nhost = config[\"serve_args\"][\"host\"]\nport = config[\"serve_args\"][\"port\"]\nstart_server(host, port)\n# ----------------------------启动 Controller 和 Openai API 服务----------------------------------------------------\nfor model_name, model_config in config[\"models\"].items():\n # 启用的模型\n if model_config[\"enable\"]:\n pprint(model_config)\n print()\n # 模型地址\n model_name_or_path = model_config[\"model_name_or_path\"]\n # 模型类型\n model_type = model_config[\"model_type\"]\n # model type 校验\n py_path = f\"{root_dir}/model_worker/{model_type}.py\"\n\n model_names = model_name\n if model_config[\"alias\"]:\n model_names = model_name + \",\" + model_config[\"alias\"]\n\n # 获取 worker 数目 并获取每个 worker 的资源\n workers = model_config[\"workers\"]\n # if model_config[\"work_mode\"] == \"deepspeed\":\n # 设置使用 deepspeed\n\n process = []\n for worker in workers:\n gpus = worker[\"gpus\"]\n # 将gpus int ---> str\n gpus = [str(i) for i in gpus]\n gpus_str = \",\".join(gpus)\n num_gpus = len(gpus)\n if model_config[\"work_mode\"] == \"deepspeed\":\n os.environ[\"USE_DS\"] = \"1\"\n run_mode = f\"deepspeed --num_gpus {num_gpus} \"\n pass\n elif model_config[\"work_mode\"] == \"accelerate\":\n os.environ[\"USE_ACC\"] = \"1\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus_str\n run_mode = \"python \"\n pass\n elif model_config[\"work_mode\"] == \"hf\":\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus_str\n run_mode = \"python \"\n pass\n\n # DS 只能在代码内部起效\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = gpus_str\n cmd = (\n run_mode\n + py_path\n + f\" --gpus {gpus_str}\"\n"
},
{
"from": "gpt",
"value": " + f\" --master_port {get_free_tcp_port()}\""
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport logging\nfrom typing import List\nfrom tonic_validate.classes.llm_response import LLMResponse\nfrom tonic_validate.metrics.augmentation_accuracy_metric import (\n AugmentationAccuracyMetric\n)\nfrom tonic_validate.metrics.metric import Metric\nfrom tonic_validate.metrics.retrieval_precision_metric import RetrievalPrecisionMetric\nfrom tonic_validate.services.openai_service import OpenAIService\n\n# Code from current file (tonic_validate/metrics/augmentation_precision_metric.py):\n\nlogger = logging.getLogger()\n\n\nclass AugmentationPrecisionMetric(Metric):\n name = \"augmentation_precision\"\n\n def __init__(self) -> None:\n self.augmentation_accuracy = AugmentationAccuracyMetric()\n self.retrieval_precision = RetrievalPrecisionMetric()\n\n"
},
{
"from": "gpt",
"value": " def score(self, llm_response: LLMResponse, openai_service: OpenAIService) -> float:"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom collections.abc import Mapping\nfrom typing import Any\nfrom h3ronpy.arrow.vector import cells_to_coordinates, coordinates_to_cells\nfrom xarray.indexes import PandasIndex\nfrom xdggs.index import DGGSIndex\nfrom xdggs.utils import _extract_cell_id_variable, register_dggs\nimport numpy as np\nimport xarray as xr\n\n# Code from current file (xdggs/h3.py):\n\n\n\n\n@register_dggs(\"h3\")\nclass H3Index(DGGSIndex):\n _resolution: int\n\n def __init__(\n self,\n cell_ids: Any | PandasIndex,\n dim: str,\n resolution: int,\n ):\n super().__init__(cell_ids, dim)\n self._resolution = int(resolution)\n\n @classmethod\n def from_variables(\n cls: type[\"H3Index\"],\n variables: Mapping[Any, xr.Variable],\n *,\n options: Mapping[str, Any],\n ) -> \"H3Index\":\n"
},
{
"from": "gpt",
"value": " _, var, dim = _extract_cell_id_variable(variables)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport numpy as np\nfrom civrealm.envs import FreecivBaseEnv\nfrom civrealm.envs.freeciv_wrapper.config import default_tensor_config\nfrom .core import Wrapper\nfrom .utils import onehotifier_maker\n\n# Code from current file (src/civrealm/envs/freeciv_wrapper/tensor_base_wrapper.py):\n\n\n\n\nclass TensorBase(Wrapper):\n \"\"\"\n A basic wrapper that deals with config loading and entity id recording, \n required by all tensor-related wrappers.\n\n\n Parameters\n ----------\n env: FreecivBaseEnv\n config: dict\n tensor env configuration\n\n Attributes\n ---------\n config: dict\n A dict that specifies all configurations related to tensor wrapper.\n my_player_id: int\n My player id.\n unit_ids: list\n A sorted list of my unit ids.\n city_ids: list\n A sorted list of my city ids.\n others_unit_ids: list\n A sorted list of others unit ids.\n others_city_ids: list\n A sorted list of others city ids.\n dipl_ids : list\n A list of others player ids.\n units : dict\n ruleset information about units.\n unit_types :list\n A list of all unit types.\n unit_costs : list\n A list of int indicating unit costs.\n improvements : dict\n Ruleset information about city improvements.\n impr_costs :list\n A list of int indicating city improvements costs.\n\n \"\"\"\n\n def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):\n self.config = config\n self.my_player_id = -1\n\n # mutable ids\n self.unit_ids = []\n self.city_ids = []\n self.others_unit_ids = []\n self.others_city_ids = []\n self.dipl_ids = []\n\n # ruleset\n self.units = {}\n self.unit_types = []\n self.unit_costs = []\n self.improvements = {}\n self.impr_costs = []\n\n super().__init__(env)\n\n def update_sequence_ids(self, observation):\n \"\"\"\n Use city, unit and dipl information in observation to update ids.\n \"\"\"\n self.unit_ids = sorted(\n list(\n k\n for k in observation.get(\"unit\", {}).keys()\n if observation[\"unit\"][k][\"owner\"] == self.my_player_id\n )\n )\n self.others_unit_ids = sorted(\n list(\n k\n for k in observation.get(\"unit\", {}).keys()\n if observation[\"unit\"][k][\"owner\"] != self.my_player_id\n )\n )\n self.city_ids = sorted(\n list(\n k\n for k in observation.get(\"city\", {}).keys()\n if observation[\"city\"][k][\"owner\"] == self.my_player_id\n )\n )\n self.others_city_ids = sorted(\n list(\n k\n for k in observation.get(\"city\", {}).keys()\n if observation[\"city\"][k][\"owner\"] != self.my_player_id\n )\n )\n self.dipl_ids = [\n player\n for player in sorted(observation.get(\"dipl\", {}).keys())\n if player != self.my_player_id\n ]\n\n def update_config(self):\n \"\"\"\n Update config using ruleset information at the start of the turn.\n \"\"\"\n self.units = self.unwrapped.civ_controller.rule_ctrl.unit_types\n self.unit_types = [self.units[i][\"name\"] for i in range(len(self.units))]\n self.unit_costs = [self.units[i][\"build_cost\"] for i in range(len(self.units))]\n self.improvements = self.unwrapped.civ_controller.rule_ctrl.improvements\n self.impr_costs = [\n self.improvements[i][\"build_cost\"] for i in range(len(self.improvements))\n ]\n"
},
{
"from": "gpt",
"value": " self.config[\"obs_ops\"][\"unit\"][\"type_rule_name\"] = onehotifier_maker("
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport os\nfrom functools import partial, wraps\nfrom typing import Any, Generic, Hashable\nfrom pecapiku.base_cache import BaseCache, DecoratedCallable, Decorator, omnimethod\nfrom pecapiku.cache_access import CacheAccess, _initialize_cache, _resolve_filepath, update_cache\nfrom pecapiku.no_cache import NoCache\n\n# Code from current file (pecapiku/single_value_cache.py):\nfrom __future__ import annotations\n\n\n\n\nclass SingleValueCache(BaseCache, Generic[DecoratedCallable]):\n \"\"\" Decorator for caching of evaluation results.\n Creates a \"pickle\" file at disk space on a specified path.\n Wraps a function and stores its execution result in the file.\n To apply, use the method ``SingleValueCache.decorate()`` or ``SingleValueCache(...)()``.\n\n Args:\n\n file_path - a path to an existing or non-existent pickle file.\n If a relative path or a filename is given, puts it into the framework cache directory.\n\n access - cache access indicators. The string may include the following indicators:\n - ``r`` - read - grants access to read the cache file content\n - ``e`` - execute/evaluate - grants access to evaluate the decorated function (if such is present)\n - ``w`` - write - grants access to modify the cache file content\n\n Example\n -------\n >>> import time\n >>> from timeit import timeit\n >>> def a_heavy_function():\n ... time.sleep(1)\n ...\n ... @SingleValueCache('a_heavy_function.pkl') # or @SingleValueCache.decorate(file_path='a_heavy_function.pkl')\n >>> def a_heavy_function_cached():\n ... time.sleep(1)\n >>> print(timeit(a_heavy_function, number=10)) # 10.070\n >>> print(timeit(a_heavy_function_cached, number=10)) # 1.015\n \"\"\"\n\n @classmethod\n def _get_default_file_path(cls) -> None:\n return None\n\n def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'):\n super().__init__(file_path, access)\n self.cache_dict = None\n\n def __call__(self,\n func: DecoratedCallable | None = None, *, file_path: os.PathLike | str | None = None,\n"
},
{
"from": "gpt",
"value": " access: CacheAccess | None = None) -> DecoratedCallable | Decorator:"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom threading import Thread\nfrom queue import Queue\nfrom openai import OpenAI\nfrom .parsers import extract_json, extract_list, rectangle_corners\nfrom .wrappers import (\n job_easy_ocr,\n job_easy_ocr_boxes,\n job_tesseract,\n job_tesseract_boxes,\n)\n from .wrappers.easy_pororo_ocr import job_easy_pororo_ocr\n from .wrappers.easy_pororo_ocr import job_easy_pororo_ocr_boxes\nimport json\nimport os\n\n# Code from current file (betterocr/detect.py):\n\n\n\n\ndef wrapper(func, args, queue):\n queue.put(func(args))\n\n\n# custom error\nclass NoTextDetectedError(Exception):\n pass\n\n\ndef detect():\n \"\"\"Unimplemented\"\"\"\n raise NotImplementedError\n\n\ndef detect_async():\n \"\"\"Unimplemented\"\"\"\n raise NotImplementedError\n\n\ndef get_jobs(languages: list[str], boxes=False):\n jobs = [\n"
},
{
"from": "gpt",
"value": " job_easy_ocr if not boxes else job_easy_ocr_boxes,"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport datetime\nimport pytest\nfrom typing import Any\nfrom freezegun.api import FrozenDateTimeFactory\nfrom homeassistant.const import CONF_NAME, Platform, STATE_OFF, STATE_ON\nfrom homeassistant.core import HomeAssistant\nfrom pytest_homeassistant_custom_component.common import (\n MockConfigEntry,\n async_fire_time_changed,\n)\nfrom pytest_homeassistant_custom_component.test_util.aiohttp import AiohttpClientMocker\nfrom custom_components.oref_alert.const import (\n ADD_SENSOR_SERVICE,\n ATTR_COUNTRY_ALERTS,\n ATTR_COUNTRY_ACTIVE_ALERTS,\n ATTR_SELECTED_AREAS_ALERTS,\n ATTR_SELECTED_AREAS_ACTIVE_ALERTS,\n CONF_ALERT_MAX_AGE,\n CONF_AREAS,\n CONF_OFF_ICON,\n CONF_ON_ICON,\n CONF_POLL_INTERVAL,\n DOMAIN,\n OREF_ALERT_UNIQUE_ID,\n ALL_AREAS_ID_SUFFIX,\n)\nfrom .utils import load_json_fixture, mock_urls\n\n# Code from current file (tests/test_binary_sensor.py):\n\"\"\"The tests for the binary_sensor file.\"\"\"\nfrom __future__ import annotations\n\n\n\n\n\n\n\nDEFAULT_OPTIONS = {CONF_AREAS: [\"בארי\"], CONF_ALERT_MAX_AGE: 10}\nENTITY_ID = f\"{Platform.BINARY_SENSOR}.{OREF_ALERT_UNIQUE_ID}\"\n\n\nasync def async_setup(\n hass: HomeAssistant, options: dict[str, Any] | None = None\n) -> str:\n \"\"\"Integration setup.\"\"\"\n options = options or {}\n config_entry = MockConfigEntry(\n domain=DOMAIN, options={**DEFAULT_OPTIONS, **options}\n )\n config_entry.add_to_hass(hass)\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n return config_entry.entry_id\n\n\nasync def async_shutdown(hass: HomeAssistant, config_id: str) -> None:\n \"\"\"Shutdown by removing the integration.\"\"\"\n assert await hass.config_entries.async_remove(config_id)\n await hass.async_block_till_done()\n\n\nasync def test_state(\n hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker,\n freezer: FrozenDateTimeFactory,\n) -> None:\n \"\"\"Test entity state.\"\"\"\n freezer.move_to(\"2023-10-07 06:30:00+03:00\")\n mock_urls(aioclient_mock, None, \"single_alert_history.json\")\n\n config_id = await async_setup(hass)\n assert hass.states.get(ENTITY_ID).state == STATE_ON\n\n freezer.move_to(\"2023-10-07 06:39:50+03:00\")\n async_fire_time_changed(hass)\n await hass.async_block_till_done()\n assert hass.states.get(ENTITY_ID).state == STATE_ON\n\n freezer.move_to(\"2023-10-07 06:40:01+03:00\")\n async_fire_time_changed(hass)\n await hass.async_block_till_done()\n assert hass.states.get(ENTITY_ID).state == STATE_OFF\n\n await async_shutdown(hass, config_id)\n\n\[email protected](\n (\"areas\",),\n (([\"תל אביב - כל האזורים\"],), ([\"מחוז דן\"],)),\n ids=(\"City all areas\", \"District\"),\n)\nasync def test_real_time_alert_area_expansion(\n hass: HomeAssistant, aioclient_mock: AiohttpClientMocker, areas: list[str]\n) -> None:\n \"\"\"Test real time alert and city expansion.\"\"\"\n mock_urls(aioclient_mock, \"single_alert_real_time.json\", None)\n config_id = await async_setup(hass, {CONF_AREAS: areas})\n assert hass.states.get(ENTITY_ID).state == STATE_ON\n await async_shutdown(hass, config_id)\n\n\nasync def test_state_attributes(\n hass: HomeAssistant,\n aioclient_mock: AiohttpClientMocker,\n freezer: FrozenDateTimeFactory,\n) -> None:\n \"\"\"Test state attributes.\"\"\"\n freezer.move_to(\"2023-10-07 06:30:00+03:00\")\n mock_urls(\n aioclient_mock, \"multi_alerts_real_time.json\", \"multi_alerts_history.json\"\n )\n config_id = await async_setup(hass)\n state = hass.states.get(ENTITY_ID)\n"
},
{
"from": "gpt",
"value": " active_area_alert = load_json_fixture(\"single_alert_history.json\")"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport concurrent.futures\nimport json\nfrom pathlib import Path\nfrom time import sleep\nfrom urllib.parse import urlparse\nfrom loguru import logger\nfrom rich.prompt import Confirm\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom selenium.webdriver.common.by import By\nfrom .browser import get_browser, get_multiple_browsers\nfrom .pool import ThreadPool\nfrom .util.config import config, work_directory, set_work_directory\n from .tweet import Tweet\n\n# Code from current file (twitter_user_tweet_crawler/__main__.py):\n\n\n\n\ndef main():\n cookie: list[dict]\n work_list: list[WebDriver]\n driver: WebDriver\n\n def read_config() -> list[dict]:\n with open(work_directory / 'cookie.json', 'r') as f:\n return json.load(f)\n\n def write_config(data: list[dict]):\n with open(work_directory / 'cookie.json', 'w') as f:\n json.dump(data, f)\n\n def set_cookie(browser: WebDriver):\n for i in cookie:\n browser.add_cookie(i)\n\n def get_executor(count: int | None = None):\n return concurrent.futures.ThreadPoolExecutor(max_workers=count)\n\n def get_items_need_handle():\n return driver.find_elements(*selector)\n\n selector = (By.XPATH, '//*/div[2]/div/div[3]/a[@role=\"link\"]')\n"
},
{
"from": "gpt",
"value": " (Path(config.save) / 'res').mkdir(exist_ok=True, parents=True)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport torch\nfrom com_hom_emg.data import get_datasets, get_per_subj_data\n\n# Code from current file (tests/test_data.py):\n\n\n\ndef test_get_datasets_disjoint_val_test():\n # The subject used for val should be different each time\n # Likewise for test\n per_subj_data = get_per_subj_data()\n all_val_subj = []\n all_test_subj = []\n\n n_train = 8\n n_val = 1\n n_test = 1\n\n expected_train_size = 8 * 1224 # 1224 gestures per subject\n expected_val_size = n_val * 1224\n expected_test_size = n_test * 1224\n\n def check_contents(dataset, N):\n ## Check shapes\n # data = 8-channel EMG, 962 timesteps (= 500ms at 1926 Hz)\n assert dataset.tensors[0].shape == torch.Size([N, 8, 962])\n # labels = 2D labels\n assert dataset.tensors[1].shape == torch.Size([N, 2])\n # is_single = bool labels\n assert dataset.tensors[2].shape == torch.Size([N])\n # subj_ids = 1d labels\n assert dataset.tensors[3].shape == torch.Size([N])\n\n ## Check dtypes\n assert dataset.tensors[0].dtype == torch.float32\n assert dataset.tensors[1].dtype == torch.int64\n assert dataset.tensors[2].dtype == torch.bool\n assert dataset.tensors[3].dtype == torch.int64\n\n for i in range(10):\n"
},
{
"from": "gpt",
"value": " train_set, val_set, test_set, train_subj, val_subj, test_subj = get_datasets("
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport torch\nimport torch.nn.functional as F\nimport torchao.quantization.quant_api as quant_api\nfrom .quant_primitives import (\n dynamically_quantize_per_channel,\n quant_int8_dynamic_per_token_linear,\n)\n\n# Code from current file (torchao/quantization/smoothquant.py):\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nTesting out accuracy-only implementation of SmoothQuant\n(https://arxiv.org/pdf/2211.10438.pdf)\nNote: this is an application of input-weight equalization, with the addition that the\nmultiplication by scale is fused into the preceding layer, specifically for relevant\nparts of transformer blocks.\n\"\"\"\n\n\n\n__all__ = [\n \"get_scale\",\n \"SmoothFakeDynQuantMixin\",\n \"SmoothFakeDynamicallyQuantizedLinear\",\n \"swap_linear_with_smooth_fq_linear\",\n \"smooth_fq_linear_to_inference\",\n \"set_smooth_fq_attribute\",\n]\n\ndef get_scale(X_absmax, W_absmax, alpha=0.5):\n \"\"\"\n Calculate the scale based on abs(max(X)), abs(max(W)) and alpha\n If X is of dimension `b*n*k` and W is dimension `k*m`, the returned\n scale is of dimension `k`.\n Note: X_absmax is calculated outside of this function because we\n need to keep a running version of it during calibration. W_absmax\n is calculated outside of this function for consistency with X_absmax.\n \"\"\"\n X_pow = torch.pow(X_absmax, alpha)\n W_pow = torch.pow(W_absmax, 1.0 - alpha)\n div = X_pow / W_pow\n return div.reshape(-1)\n\n\nclass SmoothFakeDynQuantMixin(torch.nn.Module):\n def init_smoothquant_variables(self, alpha):\n self.calibrating = True\n self.x_running_abs_max = None\n self.register_buffer(\"smooth_scale\", None)\n self.alpha = alpha\n # debug only\n self.debug_skip_scaling = False\n # self.debug_skip_scaling = True\n\n # Currently torch._int_mm cuBLAS underlying kernel does not work with\n # non-contiguous weight. However, torch.compil'ing through\n # torch._int_mm leads to triton code which is ~2x faster if the weight\n # is transposed. So, for now we have a debug flag to toggle whether\n # we store the quantized weight transposed, so that we can get correct\n # numerics both in eager mode and after torch.compile.\n # The default is True for cuBLAS / eager mode, set to False for\n # torch.compile.\n # self.store_w_int_repr_t = True\n self.store_w_int_repr_t = False\n\n def update_x_running_abs_max(self, X):\n # update the running max of incoming activations\n all_dims_except_last = tuple(range(len(X.shape) - 1))\n cur_abs_max = torch.amax(torch.abs(X), dim=all_dims_except_last)\n if self.x_running_abs_max is None:\n self.x_running_abs_max = cur_abs_max\n else:\n self.x_running_abs_max = torch.max(cur_abs_max, self.x_running_abs_max)\n\n def get_scaled_quantized_w(self):\n # inference\n assert (\n self.smooth_scale is not None\n ), \"self.smooth_scale is None, did you turn on inference?\"\n W = self.weight\n\n # scale weight\n # in the future, this can be done ahead of time instead of\n # during inference\n if not self.debug_skip_scaling:\n # TODO(future): do below in `to_inference` instead of here\n W = torch.matmul(\n torch.diag(self.smooth_scale), W.transpose(0, 1)\n ).transpose(0, 1)\n\n # fake quantize input and weight, and then do matmul in fp32/fp16\n # in the future, this should be replaced with quantized kernels which\n # work on NVIDIA GPUs (such as protoquant's implementation)\n W_dq_dtype = W.dtype\n"
},
{
"from": "gpt",
"value": " W_int_repr, W_scales, W_zps = dynamically_quantize_per_channel("
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom .torch_nn import BasicConv, batched_index_select, act_layer\nfrom .torch_edge import DenseDilatedKnnGraph\nfrom .pos_embed import get_2d_relative_pos_embed\nfrom timm.models.layers import DropPath\n\n# Code from current file (lib/gcn_lib/torch_vertex.py):\n# 2022.06.17-Changed for building ViG model\n# Huawei Technologies Co., Ltd. <[email protected]>\n\n\nclass MRConv2d(nn.Module):\n \"\"\"\n Max-Relative Graph Convolution (Paper: https://arxiv.org/abs/1904.03751) for dense data type\n \"\"\"\n def __init__(self, in_channels, out_channels, act='relu', norm=None, bias=True, kernel_size=1, padding=0, groups=4):\n super(MRConv2d, self).__init__()\n"
},
{
"from": "gpt",
"value": " self.nn = BasicConv([in_channels*2, out_channels], act, norm, bias, kernel_size=1, padding=0, groups=4)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport pytest\nfrom requests_mock import Mocker\nfrom ..utils import group_by_url, MockSet\n\n# Code from current file (multi_api_mocker/contrib/pytest_plugin.py):\n\n\n\[email protected](scope=\"function\")\ndef setup_api_mocks(requests_mock: Mocker, request) -> MockSet:\n \"\"\"\n A pytest fixture for configuring mock API responses in a test environment.\n It takes subclasses of MockAPIResponse, each representing a unique API call\n configuration. These subclasses facilitate the creation of simple or complex\n response flows, simulating real-world API interactions.\n\n Parameters:\n requests_mock (Mocker): The pytest requests_mock fixture.\n request: The pytest request object containing parametrized test data.\n\n Returns:\n MockSet: An instance of MockSet containing the organized MockAPIResponse\n objects, ready for use in tests.\n\n The fixture supports multiple test scenarios, allowing for thorough\n testing of varying API response conditions. This is especially useful\n for simulating sequences of API calls like Fork, Commit, and Push\n in a version control system context.\n\n Example Usage:\n - Single API Call Test:\n @pytest.mark.parametrize(\"setup_api_mocks\", [([Fork()])], indirect=True)\n\n - Multi-call Sequence Test:\n @pytest.mark.parametrize(\n \"setup_api_mocks\", [([Fork(), Commit(), Push()])], indirect=True\n )\n\n - Testing Multiple Scenarios:\n @pytest.mark.parametrize(\n \"setup_api_mocks\",\n [([Fork(), Commit(), Push()]), ([Fork(), Commit(), ForcePush()])],\n indirect=True\n )\n\n\n This fixture converts the list of MockAPIResponse subclasses into MockConfiguration\n instances, registers them with requests_mock, and returns a MockSet object, which\n allows querying each mock by its endpoint name.\n \"\"\"\n # Convert the incoming parameter to a list of MockConfiguration instances\n"
},
{
"from": "gpt",
"value": " api_mocks_configurations = group_by_url(request.param)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom continuous_eval.llm_factory import DefaultLLM, LLMInterface\nfrom continuous_eval.metrics.base import LLMBasedMetric\nfrom continuous_eval.metrics.retrieval_LLM_based_metrics import LLMBasedContextCoverage\n\n# Code from current file (continuous_eval/metrics/generation_LLM_based_metrics.py):\n\n\nclass LLMBasedFaithfulness(LLMBasedMetric):\n \"\"\"\n The LLM based faithfulness metric.\n Measures whether the generated answer is faithful to the retrieved context.\n \"\"\"\n\n def __init__(\n self,\n model: LLMInterface = DefaultLLM,\n use_few_shot: bool = True,\n classify_by_statement: bool = False,\n ):\n super().__init__(model)\n self.use_few_shot = use_few_shot\n self.classify_by_statement = classify_by_statement\n\n def __str__(self):\n return f\"LLMBasedFaithfulness(model={self.model}, use_few_shot={self.use_few_shot}, classify_by_statement={self.classify_by_statement})\"\n\n def calculate(self, question, retrieved_contexts, answer, **kwargs):\n \"\"\"\n Calculate the faithfulness score for the given datapoint.\n \"\"\"\n if self.classify_by_statement:\n # Context coverage uses the same prompt as faithfulness because it calculates how what proportion statements in the answer can be attributed to the context.\n # The difference is that faithfulness uses the generated answer, while context coverage uses ground truth answer (to evaluate context).\n"
},
{
"from": "gpt",
"value": " context_coverage = LLMBasedContextCoverage(use_few_shot=self.use_few_shot)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport pandas as pd\nimport torch\nimport numpy as np\nimport json\nfrom objaverse_utils.utils import text_to_uid_dict\nfrom gpt_4.verification import check_text_similarity\nfrom gpt_4.bard_verify import verify_objaverse_object\n\n# Code from current file (objaverse_utils/find_uid_utils.py):\n\nobjaverse_csv = pd.read_csv('objaverse_utils/Cap3D_automated_Objaverse.csv')\nobjaverse_csv = objaverse_csv.dropna()\nobjaverse_csv_uids = list(objaverse_csv.iloc[:, 0].values)\nobjaverse_csv_annotations = list(objaverse_csv.iloc[:, 1].values)\nobjaverse_csv_annotations_embeddings = torch.load(\"objaverse_utils/data/cap3d_sentence_bert_embeddings.pt\")\ntag_uids = []\ntag_embeddings = []\ntag_descriptions = [] \nnum_chunks = 31\nfor idx in range(num_chunks):\n uids = torch.load(\"objaverse_utils/data/default_tag_uids_{}.pt\".format(idx))\n embeddings = torch.load(\"objaverse_utils/data/default_tag_embeddings_{}.pt\".format(idx))\n descriptions = torch.load(\"objaverse_utils/data/default_tag_names_{}.pt\".format(idx))\n tag_uids = tag_uids + uids\n tag_descriptions = tag_descriptions + descriptions\n tag_embeddings.append(embeddings)\n\ndef find_uid(obj_descrption, candidate_num=10, debug=False, task_name=None, task_description=None):\n"
},
{
"from": "gpt",
"value": " uids = text_to_uid_dict.get(obj_descrption, None)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom collections import OrderedDict\nfrom icecream import ic # pylint: disable=E0401\nfrom .elem import Edge, Node, NodeEnum, RelEnum\nimport json\nimport typing\nimport networkx as nx # pylint: disable=E0401\nimport spacy # pylint: disable=E0401\n\n# Code from current file (textgraphs/graph.py):\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis class implements a generic, in-memory graph data structure used\nto represent the _lemma graph_.\n\nsee copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md\n\"\"\"\n\n\n\n\n\n######################################################################\n## class definitions\n\nclass SimpleGraph:\n \"\"\"\nAn in-memory graph used to build a `MultiDiGraph` in NetworkX.\n \"\"\"\n\n def __init__ (\n self\n ) -> None:\n \"\"\"\nConstructor.\n \"\"\"\n self.nodes: typing.Dict[ str, Node ] = OrderedDict()\n"
},
{
"from": "gpt",
"value": " self.edges: typing.Dict[ str, Edge ] = {}"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom jinja2 import Environment, FileSystemLoader\nfrom epg import utils\nfrom epg.generator import xmltv\nfrom epg.generator import diyp\nfrom epg.scraper import __xmltv\nfrom lxml import etree\nfrom datetime import datetime, timezone\nfrom croniter import croniter\nimport os\nimport shutil\n\n# Code from current file (main.py):\n\nCF_PAGES = os.getenv(\"CF_PAGES\")\nCF_PAGES_URL = os.getenv(\"CF_PAGES_URL\")\nDEPLOY_HOOK = os.getenv(\"DEPLOY_HOOK\")\nCLOUDFLARE_API_TOKEN = os.getenv(\"CLOUDFLARE_API_TOKEN\")\nXMLTV_URL = os.getenv(\"XMLTV_URL\", \"\")\nTZ = os.getenv(\"TZ\")\nif TZ == None:\n print(\n \"!!!Please set TZ environment variables to define timezone or it will use system timezone by default!!!\"\n )\nCRON_TRIGGER = os.getenv(\"CRON_TRIGGER\", \"0 0 * * *\")\nnext_cron_time = (\n croniter(CRON_TRIGGER, datetime.now(timezone.utc))\n .get_next(datetime)\n .replace(tzinfo=timezone.utc)\n .astimezone()\n)\n\ndtd = etree.DTD(open(\"xmltv.dtd\", \"r\"))\n\nnow = datetime.now()\ncurrent_timezone = now.astimezone().tzinfo\ntimezone_name = current_timezone.tzname(now)\ntimezone_offset = now.astimezone().strftime(\"%z\")\nprint(\"use timezone:\", timezone_name, f\"UTC{timezone_offset}\", flush=True)\n\nconfig_path = os.path.join(os.getcwd(), \"config\", \"channels.yaml\")\nepg_path = os.path.join(os.getcwd(), \"web\", \"epg.xml\")\nif not os.path.exists(os.path.join(os.getcwd(), \"web\")):\n os.mkdir(os.path.join(os.getcwd(), \"web\"))\n\nchannels = utils.load_config(config_path)\n\nif XMLTV_URL == \"\":\n xml_channels = []\n print(\"!!!Please set XMLTV_URL environment variables to reuse XML!!!\")\nelse:\n print(\"reuse XML:\", XMLTV_URL, flush=True)\n"
},
{
"from": "gpt",
"value": " xml_channels = __xmltv.get_channels(XMLTV_URL, dtd)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport os\nimport urllib\nimport chess\nfrom typing import Iterator\nfrom fastapi import WebSocket, WebSocketDisconnect\nfrom langserve import RemoteRunnable\nfrom chesster.app.utils import (\n display_board,\n get_engine_score,\n serialize_board_state_with_last_move,\n)\n\n# Code from current file (chesster/app/board_manager.py):\n\n\n\n\nLANGSERVE_HOST = os.getenv(\"LANGSERVE_HOST\", \"localhost\")\nLANGSERVE_SECRET = os.getenv(\"LANGSERVE_SECRET\", \"secret\")\nCHAT_HISTORY_LENGTH = 50 # Number of most recent (human, ai) exchanges to retain.\n\n\nclass BoardManager:\n def __init__(self):\n self.active_websockets: list[WebSocket] = []\n self.last_updated_image = None\n self.board = chess.Board()\n self.player_side = chess.WHITE\n self.interesting_move_iterator = None\n self.chat_history = []\n self.remote_runnable = RemoteRunnable(\n f\"http://{LANGSERVE_HOST}:8001/chesster\", headers={\"x-token\": LANGSERVE_SECRET}\n )\n\n async def set_board(self, board: chess.Board) -> None:\n \"\"\"Set board.\"\"\"\n self.board = board\n await self.update_board(self.board)\n\n async def set_player_side(self, player_side: chess.Color) -> None:\n \"\"\"Set player side.\"\"\"\n self.player_side = player_side\n await self.update_board(self.board)\n\n async def set_interesting_move_iterator(self) -> None:\n \"\"\"Calculate interesting moves in board's move stack.\"\"\"\n self.interesting_move_iterator = self._interesting_move_iterator()\n\n async def make_move(self, move: chess.Move) -> None:\n \"\"\"Parse move and update board.\"\"\"\n self.board.push(move)\n await self.update_board(self.board)\n\n async def _interesting_move_iterator(\n self, centipawn_threshold: int = 100\n ) -> Iterator[chess.Board]:\n \"\"\"Make iterator over interesting moves according to Chess engine.\"\"\"\n new_board = chess.Board()\n centipawns = 0\n for move in self.board.move_stack:\n new_board.push(move)\n new_centipawns = get_engine_score(new_board, self.player_side)\n if new_centipawns is None:\n continue\n delta = new_centipawns - centipawns\n if new_board.turn != self.player_side: # player just moved\n if abs(delta) > centipawn_threshold:\n await self.update_board(new_board)\n yield {\n"
},
{
"from": "gpt",
"value": " \"board\": serialize_board_state_with_last_move("
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport base64\nimport os\nimport shutil\nimport logging\nfrom pathlib import Path\nfrom typing import Any, Dict, List\nfrom fastapi import FastAPI, File, UploadFile\nfrom fastapi.responses import JSONResponse, StreamingResponse\nfrom pydantic import BaseModel, Field\nfrom pymilvus.exceptions import MilvusException, MilvusUnavailableException\nfrom RetrievalAugmentedGeneration.common import utils\nfrom RetrievalAugmentedGeneration.examples.developer_rag import chains\n\n# Code from current file (RetrievalAugmentedGeneration/common/server.py):\n# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The definition of the Llama Index chain server.\"\"\"\n\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# create the FastAPI server\napp = FastAPI()\n# prestage the embedding model\n_ = utils.get_embedding_model()\n# set the global service context for Llama Index\nutils.set_service_context()\n\n\nclass Prompt(BaseModel):\n \"\"\"Definition of the Prompt API data type.\"\"\"\n\n question: str = Field(description=\"The input query/prompt to the pipeline.\")\n context: str = Field(description=\"Additional context for the question (optional)\")\n use_knowledge_base: bool = Field(description=\"Whether to use a knowledge base\", default=True)\n num_tokens: int = Field(description=\"The maximum number of tokens in the response.\", default=50)\n\n\nclass DocumentSearch(BaseModel):\n \"\"\"Definition of the DocumentSearch API data type.\"\"\"\n\n content: str = Field(description=\"The content or keywords to search for within documents.\")\n num_docs: int = Field(description=\"The maximum number of documents to return in the response.\", default=4)\n\n\[email protected](\"/uploadDocument\")\nasync def upload_document(file: UploadFile = File(...)) -> JSONResponse:\n \"\"\"Upload a document to the vector store.\"\"\"\n if not file.filename:\n return JSONResponse(content={\"message\": \"No files provided\"}, status_code=200)\n\n try:\n\n upload_folder = \"uploaded_files\"\n upload_file = os.path.basename(file.filename)\n if not upload_file:\n raise RuntimeError(\"Error parsing uploaded filename.\")\n file_path = os.path.join(upload_folder, upload_file)\n uploads_dir = Path(upload_folder)\n uploads_dir.mkdir(parents=True, exist_ok=True)\n\n with open(file_path, \"wb\") as f:\n shutil.copyfileobj(file.file, f)\n\n"
},
{
"from": "gpt",
"value": " chains.ingest_docs(file_path, upload_file)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport argparse\nimport datetime\nimport json\nimport os\nimport time\nfrom pytz import timezone\nfrom tqdm import tqdm\nfrom chat.server.monitor.basic_stats import NUM_SERVERS\nfrom chat.server.monitor.clean_battle_data import (\n to_openai_format,\n replace_model_name,\n)\nfrom chat.utils import detect_language\n\n# Code from current file (chat/server/monitor/clean_chat_data.py):\n\"\"\"\nClean chatbot arena chat log.\n\nUsage:\npython3 clean_chat_data.py --mode conv_release\n\"\"\"\n\n\n\n\nNETWORK_ERROR_MSG = (\n \"NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.\".lower()\n)\n\n\ndef get_log_files(max_num_files=None):\n dates = []\n for month in [4, 5, 6, 7]:\n for day in range(1, 32):\n dates.append(f\"2023-{month:02d}-{day:02d}\")\n\n for month in [8]:\n for day in range(1, 32):\n dates.append(f\"2023-{month:02d}-{day:02d}\")\n\n filenames = []\n for d in dates:\n for i in range(NUM_SERVERS):\n name = os.path.expanduser(f\"~/fastchat_logs/server{i}/{d}-conv.json\")\n if os.path.exists(name):\n filenames.append(name)\n max_num_files = max_num_files or len(filenames)\n # filenames = list(reversed(filenames))\n filenames = filenames[-max_num_files:]\n return filenames\n\n\ndef clean_chat_data(log_files):\n raw_data = []\n for filename in tqdm(log_files, desc=\"read files\"):\n for retry in range(5):\n try:\n lines = open(filename).readlines()\n break\n except FileNotFoundError:\n time.sleep(2)\n\n for l in lines:\n row = json.loads(l)\n if row[\"type\"] == \"chat\":\n raw_data.append(row)\n\n all_models = set()\n all_ips = dict()\n chats = []\n ct_invalid_conv_id = 0\n ct_invalid = 0\n ct_network_error = 0\n for row in raw_data:\n if \"conv_id\" not in row[\"state\"]:\n ct_invalid_conv_id += 1\n continue\n\n conversation_id = row[\"state\"][\"conv_id\"]\n if conversation_id is None:\n ct_invalid_conv_id += 1\n continue\n\n state = row[\"state\"]\n"
},
{
"from": "gpt",
"value": " conversation = to_openai_format(state[\"messages\"][state[\"offset\"] :])"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport requests\nimport re\nimport json\nimport time\nimport os\nimport logging\nfrom pathlib import Path\nfrom datetime import datetime, timedelta\nfrom json.decoder import JSONDecodeError\nfrom finary_uapi.user_real_estates import (\n get_user_real_estates,\n delete_user_real_estates,\n update_user_real_estates,\n add_user_real_estates,\n add_user_real_estates_with_currency,\n)\nfrom finary_uapi.user_me import get_display_currency_code\nfrom .constants import (\n GNOSIS_API_TOKENLIST_URI,\n REALT_API_TOKENLIST_URI,\n REALT_OFFLINE_TOKENS_LIST,\n)\nfrom .utils import convert_currency\n\n# Code from current file (finasync/realt.py):\n\n\n\n\n\ndef get_realt_token_details(realt_token_contractAdress):\n Now_Time = datetime.today()\n RealT_OfflineTokensList_Path = Path(REALT_OFFLINE_TOKENS_LIST)\n RealT_OfflineTokensList_Path.touch(exist_ok=True)\n with open(RealT_OfflineTokensList_Path) as json_file:\n try:\n RealT_OfflineTokensList = json.load(json_file)\n except JSONDecodeError:\n RealT_OfflineTokensList = {\n \"info\": {\n \"last_sync\": str(datetime.timestamp(Now_Time - timedelta(weeks=2)))\n },\n \"data\": {},\n }\n\n # Update offlineTokensList from RealT API only if more than 1 week old\n if float(RealT_OfflineTokensList[\"info\"][\"last_sync\"]) < datetime.timestamp(\n Now_Time - timedelta(weeks=1)\n ):\n MyRealT_API_Header = {\n \"Accept\": \"*/*\",\n \"X-AUTH-REALT-TOKEN\": os.environ[\"MYREALT_API_KEY\"],\n }\n\n TokensListReq = requests.get(\n"
},
{
"from": "gpt",
"value": " REALT_API_TOKENLIST_URI, headers=MyRealT_API_Header"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport paho.mqtt.client as mqtt\nimport json\nimport logging\nfrom datetime import datetime\nfrom rtl_433_discoverandsubmit import config\nfrom rtl_433_discoverandsubmit.modules.device_manager import save_devices_to_file\n\n# Code from current file (rtl_433_discoverandsubmit/modules/mqtt_client.py):\nlog_level = getattr(logging, config.configuration['log_level'])\nlogging.basicConfig(filename=config.configuration['log_filename'], level=log_level)\n\n# List to store detected devices\ndetected_devices = []\n\n\ndef reset_message_counters():\n global detected_devices\n for device in detected_devices:\n if 'message_count' in device:\n device['message_count'] = 0\n\ndef sort_detected_devices():\n global detected_devices\n criteria = config.configuration['current_sort_criteria']\n reverse = True if criteria in [\"last_detected_time\", \"message_count\"] else False\n\n if criteria in [\"last_detected_time\", \"message_count\"]:\n detected_devices.sort(key=lambda x: x[criteria], reverse=reverse)\n else:\n detected_devices.sort(key=lambda x: x.get('model', '').lower())\n\n return detected_devices\n\n\ndef on_connect(client, userdata, flags, rc):\n \"\"\"Callback for when the client receives a CONNACK response from the server.\"\"\"\n client.subscribe(config.configuration['topic'])\n\ndef on_message(client, userdata, msg):\n global detected_devices\n payload = json.loads(msg.payload.decode())\n\n # Safely get 'id' from payload or default to 'unknown' (fixes https://github.com/dewgenenny/rtl_433_discoverandsubmit/issues/1)\n device_id_value = payload.get('id', 'unknown')\n\n topicprefix = \"/\".join(msg.topic.split(\"/\", 2)[:2])\n\n # Construct a unique device identifier from model and id\n device_id = f\"{payload['model']}_{device_id_value}\"\n\n # Check if the device is already in the detected devices list\n existing_device = next((device for device in detected_devices if device['id'] == device_id), None)\n\n # If the device is new, add it to the list\n if not existing_device:\n device_data = {\n 'first_detected_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'last_detected_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'message_count': 1 # Initialize message count\n }\n device_data.update(payload) # Add all the payload attributes to the dictionary\n device_data['topicprefix']= topicprefix\n old_id = payload.get('id')\n logging.debug(\"Old ID = \"+ str(old_id))\n device_data['original_id'] = old_id\n device_data['id'] = device_id # Set the id field after updating from payload\n detected_devices.append(device_data)\n\n #save new device to file, so that it is remembered on startup\n"
},
{
"from": "gpt",
"value": " save_devices_to_file(detected_devices)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom re import T\nfrom sys import implementation\nfrom neuroglia.dependency_injection.service_provider import IServiceProvider, ServiceCollection, ServiceProvider\nfrom tests.services import FileLogger, LoggerBase, NullLogger, PrintLogger\nimport pytest\n\n# Code from current file (tests/cases/test_service_provider.py):\n\nclass TestServiceProvider:\n \n def test_build_should_work(self):\n #arrange\n services = ServiceCollection()\n services.add_singleton(LoggerBase, PrintLogger)\n services.add_singleton(LoggerBase, singleton = FileLogger())\n services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger)\n \n #act\n service_provider = services.build()\n \n #assert\n assert service_provider is not None, 'service_provider is none'\n \n def test_get_service_should_work(self):\n #arrange\n services = ServiceCollection()\n implementation_type = PrintLogger\n services.add_singleton(LoggerBase, implementation_type)\n service_provider = services.build()\n \n #act\n logger = service_provider.get_service(LoggerBase)\n \n #assert\n assert logger is not None, 'logger is none'\n assert isinstance(logger, implementation_type), f\"logger is not of expected type '{implementation_type.__name__}'\"\n \n def test_get_unregistered_service_should_work(self):\n #arrange\n services = ServiceCollection()\n service_provider = services.build()\n\n #act\n logger = service_provider.get_service(LoggerBase)\n \n #assert\n assert logger is None, 'logger is not none'\n\n def test_get_required_service_should_work(self):\n #arrange\n services = ServiceCollection()\n implementation_type = PrintLogger\n services.add_singleton(LoggerBase, implementation_type)\n service_provider = services.build()\n \n #act\n logger = service_provider.get_required_service(LoggerBase)\n \n #assert\n assert logger is not None, 'logger is none'\n assert isinstance(logger, implementation_type), f\"logger is not of expected type '{implementation_type.__name__}'\"\n\n def test_get_required_unregistered_service_should_raise_error(self):\n #arrange\n services = ServiceCollection()\n service_provider = services.build()\n \n #assert\n with pytest.raises(Exception): \n service_provider.get_required_service(LoggerBase)()\n \n def test_get_scoped_service_from_root_should_raise_error(self):\n #arrange\n services = ServiceCollection()\n implementation_type = PrintLogger\n services.add_scoped(LoggerBase, implementation_type)\n service_provider = services.build()\n \n #assert\n with pytest.raises(Exception): \n service_provider.get_required_service(LoggerBase)()\n\n def test_get_services_should_work(self):\n #arrange\n services = ServiceCollection()\n services.add_singleton(LoggerBase, PrintLogger)\n services.add_singleton(LoggerBase, singleton = FileLogger())\n services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger)\n service_provider = services.build()\n \n #act\n loggers = service_provider.get_services(LoggerBase)\n \n #assert\n assert len(loggers) == 3, f'expected 3 loggers, got {len(loggers)}'\n \n def test_create_scope_should_work(self):\n pass\n \n def test_get_scoped_service_should_work(self):\n pass\n\n"
},
{
"from": "gpt",
"value": " def _build_null_logger(self, provider : IServiceProvider) -> NullLogger: return NullLogger()"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport re\nimport datetime\nfrom monzo_utils.lib.config import Config\nfrom monzo_utils.model.transaction import Transaction\nfrom monzo_utils.lib.transactions import Transactions\n\n# Code from current file (monzo_utils/model/payment.py):\n self.today = datetime.datetime.now()\n\n self.cache = {}\n\n\n def data(self, abbreviate=False):\n if self.num_paid is not None:\n suffix = '%d/%d' % (\n self.num_paid,\n self.num_total\n )\n else:\n suffix = ''\n\n if self.remaining is not None:\n remaining = self.remaining\n else:\n remaining = None\n\n return {\n 'status': self.status,\n 'payment_type': self.payment_type if abbreviate is False else self.abbreviate(self.payment_type),\n 'name': self.name,\n 'suffix': suffix,\n 'amount': self.display_amount,\n 'remaining': remaining,\n 'last_date': self.short_date(self.last_date) if abbreviate else self.last_date,\n 'due_date': self.short_date(self.due_date) if abbreviate else self.due_date\n }\n\n\n def abbreviate(self, string):\n abbreviated = ''\n\n for i in range(0, len(string)):\n if string[i].isupper():\n abbreviated += string[i]\n\n return abbreviated\n\n\n def short_date(self, date):\n if not date:\n return None\n\n return date.strftime('%d/%m/%y')\n\n\n def display(self):\n data = self.data()\n\n print(\"%s: %s %s %s %s %s %s %s\" % (\n data['status'].rjust(7),\n data['payment_type'].ljust(15),\n data['name'].ljust(25),\n data['suffix'].ljust(5),\n ('£%.2f' % (data['amount'])).ljust(8),\n ('£%.2f' % (data['remaining'])).ljust(8) if data['remaining'] else ''.ljust(8),\n data['last_date'].strftime('%Y-%m-%d').ljust(12) if data['last_date'] else ''.ljust(12),\n data['due_date'].strftime('%Y-%m-%d').ljust(10) if data['due_date'] else ''\n ))\n\n\n @property\n def name(self):\n return self.payment_config['name']\n\n\n @property\n def status(self):\n if 'start_date' in self.payment_config and self.payment_config['start_date'] >= self.next_salary_date:\n return 'SKIPPED'\n\n if 'yearly_month' in self.payment_config:\n if self.yearly_payment_due_this_month(self.payment_config, self.last_salary_date) is False:\n return 'SKIPPED'\n\n if 'renew_date' in self.payment_config and self.payment_config['renew_date'] >= self.next_salary_date:\n return 'SKIPPED'\n\n if 'exclude_months' in self.payment_config and self.today.month in self.payment_config['exclude_months']:\n return 'SKIPPED'\n\n if self.last_date and self.last_date >= self.last_salary_date:\n return 'PAID'\n\n if self.due_date and self.due_date >= self.next_salary_date:\n return 'SKIPPED'\n\n return 'DUE'\n\n\n @property\n def payment_type(self):\n return re.sub(r'(?<!^)(?=[A-Z])', '_', type(self).__name__).replace('_',' ')\n\n\n @property\n def num_paid(self):\n return None\n\n\n @property\n def num_total(self):\n if 'months' in self.payment_config:\n return self.payment_config['months']\n\n return None\n\n\n @property\n def remaining(self):\n pass\n\n\n @property\n def display_amount(self):\n today = datetime.datetime.now()\n today = datetime.date(today.year, today.month, today.day)\n\n"
},
{
"from": "gpt",
"value": " if 'last_amount_overrides' in Config().keys and \\"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom tqdm import tqdm\nfrom torch_mgdcf.metrics.ranking import ndcg_score, precision_score, recall_score\nfrom torch_mgdcf.vector_search.vector_search import VectorSearchEngine\nimport numpy as np\nimport torch\n\n# Code from current file (torch_mgdcf/evaluation/ranking.py):\n# coding=utf-8\n# The code is from our another project GRecX: https://github.com/maenzhier/grecx_datasets\n\n\n\n\ndef score(ground_truth, pred_items, k_list, metrics):\n pred_match = [1 if item in ground_truth else 0 for item in pred_items]\n\n max_k = k_list[-1]\n if len(ground_truth) > max_k:\n ndcg_gold = [1] * max_k\n else:\n ndcg_gold = [1] * len(ground_truth) + [0] * (max_k - len(ground_truth))\n\n res_score = []\n for metric in metrics:\n if metric == \"ndcg\":\n score_func = ndcg_score\n elif metric == \"precision\":\n score_func = precision_score\n elif metric == \"recall\":\n score_func = recall_score\n else:\n raise Exception(\"Not Found Metric : {}\".format(metric))\n\n for k in k_list:\n if metric == \"ndcg\":\n res_score.append(score_func(ndcg_gold[:k], pred_match[:k]))\n else:\n res_score.append(score_func(ground_truth, pred_match[:k]))\n\n return res_score\n\n\ndef evaluate_mean_global_metrics(user_items_dict, user_mask_items_dict,\n user_embedding, item_embedding,\n k_list=[10, 20], metrics=[\"ndcg\"]):\n\n"
},
{
"from": "gpt",
"value": " v_search = VectorSearchEngine(item_embedding)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport argparse\nimport joblib\nimport pandas as pd\nfrom src.utils.common import read_yaml\nfrom src.utils.sys_logging import get_logger\nfrom sklearn.preprocessing import LabelEncoder\nfrom src.utils.common import write_dict_to_yaml\nfrom src.utils.data_ops import gzip_np_arr\nfrom sklearn.model_selection import train_test_split\nfrom src.utils.data_ops import get_fitted_pipeline\nfrom pathlib import Path\n\n# Code from current file (src/stage_03_preprocess_data.py):\n\nSTAGE = \"Preprocess Data\"\n\n\ndef preprocess_data():\n complete_df = pd.read_parquet(RAW_DATA_FILE_PATH)\n logger.info(\n f'The raw data file has been loaded from \"{RAW_DATA_FILE_PATH}\" with the shape \"{complete_df.shape}\"'\n )\n duplicate_rows = complete_df.duplicated().sum()\n if duplicate_rows > 0:\n logger.warning(\n f\"Found {duplicate_rows} duplicate rows, removing duplicate rows...\"\n )\n complete_df = complete_df.drop_duplicates(keep=\"first\")\n X = complete_df.drop([TARGET_COLUMN_NAME], axis=1)\n y = complete_df[TARGET_COLUMN_NAME]\n feature_cols = params[\"input_features_schema\"]\n feature_cols = list(feature_cols.keys())\n logger.info(f\"Read {len(feature_cols)} feature columns from params\")\n data_processing_pipeline = get_fitted_pipeline(\n X, feature_cols, KNN_IMPUTER_NEIGHBORS=KNN_IMPUTER_NEIGHBORS\n )\n Path(DATA_PREPROCESSING_PIPELINE_FILE_PATH).parent.absolute().mkdir(parents=True, exist_ok=True)\n joblib.dump(data_processing_pipeline, DATA_PREPROCESSING_PIPELINE_FILE_PATH, compress=1)\n logger.info(f\"Saved the preprocessing pipeline to {DATA_PREPROCESSING_PIPELINE_FILE_PATH}\")\n data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH)\n data_processing_pipeline\n data_processing_pipeline = joblib.load(DATA_PREPROCESSING_PIPELINE_FILE_PATH)\n logger.info(\n f'Loaded sklearn data preprocessing pipeline from \"{DATA_PREPROCESSING_PIPELINE_FILE_PATH}\"'\n )\n X_transformed = data_processing_pipeline.transform(X)\n logger.info(f'Dataframe shape after transformation is \"{X_transformed.shape}\"')\n\n le = LabelEncoder()\n le.fit(y)\n labels_mapping_dict = {\"labels_mapping\": \"\"}\n le_dict = dict(zip(le.transform(le.classes_), le.classes_))\n le_dict = {int(k): v for k, v in le_dict.items()}\n\n labels_mapping_dict[\"labels_mapping\"] = le_dict\n logger.info(f\"Label encoding map has the dictionary: {le_dict}\")\n write_dict_to_yaml(labels_mapping_dict, parsed_args.params)\n logger.info(f'Updated the label encoding map in the file at \"{parsed_args.params}\"')\n"
},
{
"from": "gpt",
"value": " labels_dict = read_yaml(parsed_args.params)[\"labels_mapping\"]"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport multiprocessing as mp\n import time\n import traceback\n import traceback\nfrom dynapipe.pipe.data_loader import (\n _get_from_shared_kv_store,\n _init_kv_store,\n _put_to_shared_kv_store,\n)\n\n# Code from current file (tests/test_kv_store.py):\n# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n# Note: this test requires torch\n# to run this test, exec:\n# DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \\\n# torchrun --standalone --nnodes=1 --nproc_per_node=1 test_kv_store.py\n\n\n\n\ndef _producer_process(max_iters, buffer_size=32):\n try:\n kv_store, _, _ = _init_kv_store(is_master=True)\n # set all ack keys\n for i in range(buffer_size):\n kv_store.set(f\"key_{i}_ack\".format(i), \"1\")\n kv_store.set(f\"key_{i}_r0_ack\".format(i), \"1\")\n for i in range(max_iters):\n key = \"key_{}\".format(i % buffer_size)\n payload = str(i)\n _put_to_shared_kv_store(kv_store, key, payload)\n print(\"[producer] put key: {}\".format(key), flush=True)\n\n time.sleep(2)\n except Exception as e:\n\n traceback.print_exc()\n raise e\n\n\ndef _consumer_process(max_iters, buffer_size=32):\n try:\n kv_store, _, _ = _init_kv_store(is_master=False)\n for i in range(max_iters):\n key = \"key_{}\".format(i % buffer_size)\n"
},
{
"from": "gpt",
"value": " payload = _get_from_shared_kv_store("
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom stable_diffusion.config import PathConfig\nfrom stable_diffusion.model_io import preload_models_from_safetensor_weights\nfrom utils import _state_dict\nfrom utils import get_state_dict_from_safetensor\n\n# Code from current file (model_inspector.py):\n\n\nINSPECTION_FILE = \"model_inspection.txt\"\nNUM_ITEMS = 100\n\nMODEL_FILE = \"./models/v2-1_512-ema-pruned.safetensors\"\nMODEL_FILE1 = \"./unet/diffusion_pytorch_model_test.safetensors\"\nMODEL_FILE2 = \"./unet/xxmix9realistic_v40.safetensors\"\n\n\n\n# Recreate the inspection file at every execution of the script\nwith open(INSPECTION_FILE, 'w') as f:\n pass\n\ndef write_to_file(*args, **kwargs):\n \"\"\"Write the text to the inspection file.\"\"\"\n # Convert the arguments to a string\n message = ' '.join(map(str, args))\n\n # Print the message to the console\n print(message, **kwargs)\n\n # Open the log file in append mode and write the message\n with open(INSPECTION_FILE, 'a') as f:\n f.write(message + '\\n')\n\n\ndef inspect_model(path_config: PathConfig, keys_only=True):\n \"\"\"Inspect the contents of the models.\"\"\"\n\n # Load the models using the provided config and weights paths\n unet_model = load_unet_local(path_config.unet_config, MODEL_FILE)\n text_encoder_model = load_text_encoder_local(MODEL_FILE)\n autoencoder_model = load_autoencoder_local(MODEL_FILE)\n diffusion_config = load_diffusion_config_local(path_config.diffusion_config)\n tokenizer = load_tokenizer_local(path_config.tokenizer_vocab, path_config.tokenizer_merges)\n\n # Convert the models' state_dict to a dictionary and iterate over it\n for model_name, model in zip([\"unet\", \"text_encoder\", \"autoencoder\"], [unet_model, text_encoder_model, autoencoder_model]):\n write_to_file(\"-\" * 50)\n write_to_file(f\"Model: {model_name}\")\n write_to_file(\"-\" * 50)\n"
},
{
"from": "gpt",
"value": " for key, value in _state_dict(model).items():"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom pydantic import Field, validator\nfrom ark_sdk_python.models.common import ArkProtocolType\nfrom ark_sdk_python.models.common.ark_workspace_type import ArkWorkspaceType\nfrom ark_sdk_python.models.services.dpa.policies.common.ark_dpa_base_authorization_rule import ArkDPABaseAuthorizationRule\nfrom ark_sdk_python.models.services.dpa.policies.common.ark_dpa_base_connection_information import ArkDPABaseConnectionInformation\nfrom ark_sdk_python.models.services.dpa.policies.vm.ark_dpa_vm_connection_data import ArkDPAVMProvidersConnectionDict\n\n# Code from current file (ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py):\n\n\n\nclass ArkDPAVMConnectionInformation(ArkDPABaseConnectionInformation):\n connect_as: ArkDPAVMProvidersConnectionDict = Field(description='In which fashion the connection is made')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('connect_as')\n def validate_connect_as(cls, val):\n for k, v in val.items():\n"
},
{
"from": "gpt",
"value": " if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom pytorch_grad_cam import GradCAM\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom pytorch_grad_cam.utils.image import scale_cam_image\nfrom utils import scoremap2bbox\nfrom clip_text import class_names, new_class_names_coco, BACKGROUND_CATEGORY_COCO\nfrom torch import multiprocessing\nfrom torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, RandomHorizontalFlip\n from torchvision.transforms import InterpolationMode\nimport torch\nimport clip\nimport numpy as np\nimport cv2\nimport os\nimport argparse\nimport warnings\n\n# Code from current file (CLIP-ES/generate_cams_coco.py):\n# -*- coding:UTF-8 -*-\n\ntry:\n BICUBIC = InterpolationMode.BICUBIC\nexcept ImportError:\n BICUBIC = Image.BICUBIC\nwarnings.filterwarnings(\"ignore\")\n_CONTOUR_INDEX = 1 if cv2.__version__.split('.')[0] == '3' else 0\n\ndef reshape_transform(tensor, height=28, width=28):\n tensor = tensor.permute(1, 0, 2)\n result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2))\n\n # Bring the channels to the first dimension,\n # like in CNNs.\n result = result.transpose(2, 3).transpose(1, 2)\n return result\n\ndef split_dataset(dataset, all_label_list, n_splits):\n if n_splits == 1:\n return [dataset], [all_label_list]\n part = len(dataset) // n_splits\n dataset_list = []\n split_label_list = []\n for i in range(n_splits - 1):\n dataset_list.append(dataset[i*part:(i+1)*part])\n split_label_list.append(all_label_list[i*part:(i+1)*part])\n dataset_list.append(dataset[(i+1)*part:])\n split_label_list.append(all_label_list[(i+1)*part:])\n\n return dataset_list, split_label_list\n\ndef zeroshot_classifier(classnames, templates, model):\n with torch.no_grad():\n zeroshot_weights = []\n for classname in classnames:\n texts = [template.format(classname) for template in templates] #format with class\n texts = clip.tokenize(texts).to(device) #tokenize\n class_embeddings = model.encode_text(texts) #embed with text encoder\n class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)\n class_embedding = class_embeddings.mean(dim=0)\n class_embedding /= class_embedding.norm()\n zeroshot_weights.append(class_embedding)\n zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)\n return zeroshot_weights.t()\n\n\nclass ClipOutputTarget:\n def __init__(self, category):\n self.category = category\n def __call__(self, model_output):\n if len(model_output.shape) == 1:\n return model_output[self.category]\n return model_output[:, self.category]\n\n\ndef _convert_image_to_rgb(image):\n return image.convert(\"RGB\")\n\ndef _transform_resize(h, w):\n return Compose([\n Resize((h,w), interpolation=BICUBIC),\n _convert_image_to_rgb,\n ToTensor(),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n ])\n\n\ndef img_ms_and_flip(img_path, ori_height, ori_width, scales=[1.0], patch_size=16):\n all_imgs = []\n for scale in scales:\n preprocess = _transform_resize(int(np.ceil(scale * int(ori_height) / patch_size) * patch_size), int(np.ceil(scale * int(ori_width) / patch_size) * patch_size))\n image = preprocess(Image.open(img_path))\n image_ori = image\n image_flip = torch.flip(image, [-1])\n all_imgs.append(image_ori)\n all_imgs.append(image_flip)\n return all_imgs\n\n\ndef perform(process_id, dataset_list, args, model, bg_text_features, fg_text_features, cam, split_label_list):\n n_gpus = torch.cuda.device_count()\n device_id = \"cuda:{}\".format(process_id % n_gpus)\n databin = dataset_list[process_id]\n all_label_list = split_label_list[process_id]\n model = model.to(device_id)\n bg_text_features = bg_text_features.to(device_id)\n fg_text_features = fg_text_features.to(device_id)\n for im_idx, im in enumerate(tqdm(databin)):\n img_path = os.path.join(args.img_root, im)\n ori_image = Image.open(img_path)\n ori_height, ori_width = np.asarray(ori_image).shape[:2]\n label_id_list = all_label_list[im_idx]\n label_list = []\n for lid in label_id_list:\n"
},
{
"from": "gpt",
"value": " label_list.append(new_class_names_coco[int(lid)])"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom os.path import join\nfrom easyvolcap.utils.console_utils import *\nfrom easyvolcap.utils.data_utils import to_numpy\nfrom easyvolcap.utils.net_utils import save_npz\n from easyvolcap.scripts.main import test # will do everything a normal user would do\n from easyvolcap.engine import cfg\n from easyvolcap.engine import SAMPLERS\n from easyvolcap.runners.volumetric_video_runner import VolumetricVideoRunner\n import sys\n import torch\n import argparse\n\n# Code from current file (scripts/realtime4dv/charger.py):\n# This function will try to invoke evc programmatically\n\n\n@catch_throw\ndef main():\n # fmt: off\n sys.path.append('.')\n\n sep_ind = sys.argv.index('--')\n our_args = sys.argv[1:sep_ind]\n evv_args = sys.argv[sep_ind + 1:]\n sys.argv = [sys.argv[0]] + ['-t','test'] + evv_args\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--sampler', type=str, default='SuperChargedR4DVB')\n parser.add_argument('--sub_sampler', type=str, default='SuperChargedR4DV')\n parser.add_argument('--exp_name', type=str, default='scr4dvb_dance3')\n parser.add_argument('--save_fp32', action='store_true')\n parser.add_argument('--save_pt', action='store_true')\n parser.add_argument('--no_save_npz', action='store_false', dest='save_npz')\n args = parser.parse_args(our_args)\n\n # You have to save at least one type of model\n"
},
{
"from": "gpt",
"value": " assert args.save_pt or args.save_npz"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport logging\nfrom typing import Any\nfrom homeassistant.components.climate import (\n ClimateEntity,\n ClimateEntityFeature,\n HVACAction,\n HVACMode,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import ATTR_TEMPERATURE, UnitOfTemperature\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.util.unit_conversion import TemperatureConverter\nfrom .const import (\n DEFAULT_MAX_TEMP,\n DEFAULT_MIN_TEMP,\n DOMAIN,\n HA_PRESET_MODES,\n VOLTALIS_CONTROLLER,\n VOLTALIS_PRESET_MODES,\n VOLTALIS_HEATER_TYPE,\n)\nfrom .entity import VoltalisEntity\n\n# Code from current file (custom_components/voltalis/climate.py):\n\"\"\"Platform for climate integration.\"\"\"\nfrom __future__ import annotations\n\n\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n \"\"\"Set up climate entity for Voltalis Appliance.\"\"\"\n"
},
{
"from": "gpt",
"value": " controller = hass.data[DOMAIN][entry.entry_id][VOLTALIS_CONTROLLER]"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport logging\nimport os\nimport pathspec\nfrom .models import File, Project\nfrom .settings import Settings\nfrom .output_builder import OutputBuilder\n\n# Code from current file (gptize/gptizer.py):\n\n\nclass GPTizer:\n def __init__(self):\n self._project = None\n self._gitignore = None\n\n def process_directory(self, root_path: str):\n \"\"\"\n Processes all the files within a given directory. This method initializes\n the Project object for the specified directory, loads the .gitignore patterns,\n and populates the project with files that are not ignored by .gitignore.\n\n The method traverses through the directory recursively and adds all relevant\n files to the project's file list, ensuring that binary files and files\n specified in .gitignore are not included.\n\n Parameters:\n root_path (str): The path to the root of the directory to be processed.\n\n Raises:\n FileNotFoundError: If the specified directory does not exist.\n Exception: For any other issues encountered during the directory processing.\n \"\"\"\n project_name = os.path.basename(root_path)\n self._project = Project(project_name, root_path)\n self._gitignore = self.load_gitignore(root_path)\n self.populate_files()\n\n def process_file(self, file_path: str):\n \"\"\"\n Processes a single file. This method creates a Project object for the file,\n treating the file as an individual project. It bypasses .gitignore processing,\n as it is assumed that the specific file is intentionally selected for processing.\n\n The method creates a File object for the specified file, reads its content,\n and adds it to the project's file list. It handles binary and text files\n accordingly.\n\n Parameters:\n file_path (str): The path to the file to be processed. This includes both\n the directory path and file name.\n\n Raises:\n FileNotFoundError: If the specified file does not exist.\n IOError: If there is an issue reading the file.\n Exception: For any other unexpected issues encountered during file processing.\n \"\"\"\n root_path, file_name = os.path.split(file_path)\n project_name = os.path.basename(root_path) if root_path else 'SingleFileProject'\n self._project = Project(project_name, root_path or '.')\n self._gitignore = pathspec.PathSpec.from_lines('gitwildmatch', [])\n\n"
},
{
"from": "gpt",
"value": " file_obj = File(file_name, file_path)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom .._ffi import register_func\nfrom .codegen import target_has_features\n\n# Code from current file (python/tvm/target/x86.py):\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Common x86 related utilities\"\"\"\n\n\n@register_func(\"tvm.topi.x86.utils.get_simd_32bit_lanes\")\ndef get_simd_32bit_lanes():\n \"\"\"X86 SIMD optimal vector length lookup.\n Parameters\n ----------\n Returns\n -------\n vec_len : int\n The optimal vector length of CPU from the global context target.\n \"\"\"\n vec_len = 4\n"
},
{
"from": "gpt",
"value": " if target_has_features([\"avx512bw\", \"avx512f\"]):"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom pond.conventions import (\n METADATA_DIRNAME,\n MANIFEST_FILENAME,\n version_data_location,\n version_manifest_location,\n version_uri,\n urijoinpath,\n)\nfrom pond.version_name import SimpleVersionName\n\n# Code from current file (tests/test_conventions.py):\n\n\ndef test_urijoinpath():\n joined = urijoinpath('a', 'b/', 'c/')\n expected = 'a/b/c'\n assert joined == expected\n\n\ndef test_data_location():\n"
},
{
"from": "gpt",
"value": " location = version_data_location('abc/', 'blah.bin')"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport argparse\nimport datetime\nimport glob\nimport os\nimport urllib.parse\nfrom charset_normalizer import from_bytes\nfrom licensed_pile.licenses import PermissiveLicenses\nfrom licensed_pile.write import to_dolma\n\n# Code from current file (ubuntu/to-dolma.py):\n\"\"\"Convert the raw ubuntu data to the dolma format.\"\"\"\n\n\n\n\nSOURCE_NAME = \"ubuntu-chat\"\nBASE_URL = \"https://irclogs.ubuntu.com\"\n\nparser = argparse.ArgumentParser(description=\"Convert data to dolma.\")\nparser.add_argument(\n \"--data\",\n default=\"data/irclogs.ubuntu.com/\",\n help=\"Path to the directory containing ubuntu chat data.\",\n)\nparser.add_argument(\n \"--output_dir\",\n default=f\"data/{SOURCE_NAME}/raw/documents/\",\n help=\"Where the dolma formatted data goes.\",\n)\nparser.add_argument(\n \"--filename\", default=\"ubuntu.jsonl.gz\", help=\"The base filename for our chat data.\"\n)\nparser.add_argument(\n \"--shard_size\", type=int, default=1, help=\"Size, in GB, for each shard.\"\n)\n\n\ndef format_dolma(chat: str, source_name: str = SOURCE_NAME, base_url: str = BASE_URL):\n # Manually split because os.path.split only give (head, tail)\n *_, year, month, day, channel = os.path.splitext(chat)[0].split(os.path.sep)\n created = datetime.date(int(year), int(month), int(day))\n with open(chat, \"rb\") as f:\n # There is some encoding weirdness that this seems to fix.\n text = str(from_bytes(f.read()).best())\n return {\n # We don't want each channel to be it own data source so add the date\n # to the channel to make a unique string id.\n \"id\": f\"{created.isoformat()}-{channel}\",\n \"text\": text,\n \"source\": source_name,\n \"added\": datetime.datetime.utcnow().isoformat(),\n \"created\": created.isoformat(),\n \"metadata\": {\n \"license\": str(PermissiveLicenses.PD),\n # This will be added in the next phase of preprocessing.\n \"authors\": [],\n \"url\": urllib.parse.quote(\n f\"{base_url}/{year}/{month}/{day}/{channel}.txt\", safe=\":/\"\n ),\n \"channel\": channel,\n },\n }\n\n\ndef main(args):\n # Use iterators so we don't have to load the whole dataset in memory.\n # year month day channel\n chats = map(\n format_dolma, glob.iglob(os.path.join(args.data, \"**\", \"**\", \"**\", \"*.txt\"))\n )\n"
},
{
"from": "gpt",
"value": " to_dolma(chats, args.output_dir, args.filename, args.shard_size)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom google_colab_selenium.colab_selenium_manager import ColabSeleniumManager\nfrom google_colab_selenium.spinner import Spinner\nfrom google_colab_selenium.exceptions import StartingChromeDriverError\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium import webdriver\n\n# Code from current file (google_colab_selenium/chromedriver.py):\n\n\n\nclass ChromeDriver(webdriver.Chrome):\n \"\"\"\n A thin wrapper around the Selenium Chrome Webdriver which makes it easy\n to use in Google Colab Notebooks.\n\n The ColabSeleniumManager class installs Google-Chrome-Stable and adds the\n nessasary headers to use in a Colab Notebook.\n\n The headers that are automatically added are:\n --headless\n --no-sandbox\n --disable-dev-shm-usage\n --lang=en\n \"\"\"\n def __init__(self, options: Options = None, keep_alive: bool = True):\n self.manager = ColabSeleniumManager(options)\n\n try:\n"
},
{
"from": "gpt",
"value": " with Spinner('Initializing Chromedriver', done='Initialized Chromedriver'):"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom functools import partial\nfrom flax import linen as nn\nfrom .rec_init import matrix_init, truncated_normal_matrix_init, theta_init, nu_init, gamma_log_init\nfrom flax.core.frozen_dict import unfreeze\nimport jax\nimport jax.numpy as jnp\n\n# Code from current file (online_lru/rec.py):\n A_i, b_i = q_i\n A_j, b_j = q_j\n return A_j * A_i, jax.lax.stop_gradient(A_j * b_i) + b_j\n\n\nclass LRU(nn.Module):\n \"\"\"\n LRU layer that updates internal elegibility traces to allow online learning.\n \"\"\"\n\n d_hidden: int # hidden state dimension\n d_model: int # input and output dimensions\n seq_length: int # time sequence length\n gamma_norm: bool = True # use gamma normalization\n exp_param: bool = True # exponential parametrization for lambda\n r_min: float = 0.0 # smallest eigenvalue norm\n r_max: float = 1.0 # largest eigenvalue norm\n max_phase: float = 6.28 # max phase eigenvalue\n training_mode: str = \"bptt\" # which learning algorithm that will be used\n training: bool = False # TODO remove, for debugging purposes\n\n def get_diag_lambda(self, nu=None, theta=None):\n \"\"\"\n Transform parameters nu and theta into the diagonal of the recurrent\n Lambda matrix.\n\n Args:\n nu, theta array[N]: when set to their default values, None, the\n parameters will take the values of the Module.\n NOTE: these arguments are added in order to backpropagate through this\n transformation.\n \"\"\"\n if nu is None:\n nu = self.nu\n if theta is None:\n theta = self.theta\n if self.exp_param:\n theta = jnp.exp(theta)\n nu = jnp.exp(nu)\n return jnp.exp(-nu + 1j * theta)\n\n def get_diag_gamma(self):\n \"\"\"\n Transform parameters gamma_log into the diagonal terms of the modulation matrix gamma.\n \"\"\"\n if self.gamma_norm:\n return jnp.exp(self.gamma_log)\n else:\n return jnp.ones((self.d_hidden,))\n\n def get_B(self):\n \"\"\"\n Get input to hidden matrix B.\n \"\"\"\n return self.B_re + 1j * self.B_im\n\n def get_B_norm(self):\n \"\"\"\n Get modulated input to hidden matrix gamma B.\n \"\"\"\n return self.get_B() * jnp.expand_dims(self.get_diag_gamma(), axis=-1)\n\n def to_output(self, inputs, hidden_states):\n \"\"\"\n Compute output given inputs and hidden states.\n\n Args:\n inputs array[T, H].\n hidden_states array[T, N].\n \"\"\"\n C = self.C_re + 1j * self.C_im\n D = self.D\n y = jax.vmap(lambda x, u: (C @ x).real + D * u)(hidden_states, inputs)\n return y\n\n def get_hidden_states(self, inputs):\n \"\"\"\n Compute the hidden states corresponding to inputs\n\n Return:\n hidden_states array[T, N]\n \"\"\"\n # Materializing the diagonal of Lambda and projections\n diag_lambda = self.get_diag_lambda()\n B_norm = self.get_B_norm()\n\n # Running the LRU + output projection\n # For details on parallel scan, check discussion in Smith et al (2022).\n Lambda_elements = jnp.repeat(diag_lambda[None, ...], inputs.shape[0], axis=0)\n Bu_elements = jax.vmap(lambda u: B_norm @ u)(inputs)\n elements = (Lambda_elements, Bu_elements)\n if self.training_mode == \"bptt\":\n _, hidden_states = jax.lax.associative_scan(binary_operator_diag, elements)\n else:\n _, hidden_states = jax.lax.associative_scan(binary_operator_diag_spatial, elements)\n\n return hidden_states\n\n def setup(self):\n # Check that desired approximation is handled\n if self.training_mode == \"online_snap1\":\n raise NotImplementedError(\"SnAp-1 not implemented for LRU\")\n assert self.training_mode in [\n \"bptt\",\n \"online_full\",\n \"online_full_rec\",\n \"online_full_rec_simpleB\",\n \"online_snap1\", # same as online_full\n \"online_spatial\",\n \"online_1truncated\",\n \"online_reservoir\",\n ]\n self.online = \"online\" in self.training_mode # whether we compute the gradient online\n if self.online:\n self.approximation_type = self.training_mode[7:]\n\n # NOTE if exp_param is true, self.theta and self.nu actually represent the log of nu and\n # theta lambda is initialized uniformly in complex plane\n self.theta = self.param(\n \"theta\",\n"
},
{
"from": "gpt",
"value": " partial(theta_init, max_phase=self.max_phase, log=self.exp_param),"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport os\nimport sys\nimport argparse\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.backends.cudnn as cudnn\nimport timm.models as timm_models\nimport util.misc as misc\nfrom torch import nn\nfrom torchvision import datasets\nfrom torchvision import transforms as pth_transforms\nfrom torchvision import models as torchvision_models\nfrom timm.models.layers import trunc_normal_\nfrom util.pos_embed import interpolate_pos_embed\nfrom models import models_vit\n\n# Code from current file (main_knn.py):\n#!/usr/bin/env python\n\n\n\n\ndef extract_feature_pipeline(args):\n ######################## preparing data ... ########################\n resize_size = 256 if args.input_size == 224 else 512\n transform = pth_transforms.Compose([\n pth_transforms.Resize(resize_size, interpolation=3),\n pth_transforms.CenterCrop(args.input_size),\n pth_transforms.ToTensor(),\n pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n\n dataset_train = ReturnIndexDataset(os.path.join(args.data_path, 'train'), transform)\n dataset_val = ReturnIndexDataset(os.path.join(args.data_path, 'val'), transform)\n\n\n train_labels = torch.tensor(dataset_train.target).long()\n test_labels = torch.tensor(dataset_val.target).long()\n\n sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)\n data_loader_train = torch.utils.data.DataLoader(\n dataset_train,\n sampler=sampler,\n batch_size=args.batch_size_per_gpu,\n num_workers=args.num_workers,\n pin_memory=False,\n drop_last=False,\n )\n data_loader_val = torch.utils.data.DataLoader(\n dataset_val,\n batch_size=args.batch_size_per_gpu,\n num_workers=args.num_workers,\n pin_memory=False,\n drop_last=False,\n )\n print(f\"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.\")\n\n ######################## building network ... ########################\n"
},
{
"from": "gpt",
"value": " model = models_vit.__dict__[args.model]("
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport fire\nimport sys\nfrom .data import HUMAN_EVAL\nfrom .evaluation import evaluate_functional_correctness\n\n# Code from current file (Evaluation/MBPP/human_eval/evaluate_functional_correctness.py):\n\n\n\ndef entry_point(\n sample_file: str,\n k: str = \"1,10,100\",\n n_workers: int = 4,\n timeout: float = 3.0,\n problem_file: str = \"\",\n is_mbpp: bool = False,\n):\n \"\"\"\n Evaluates the functional correctness of generated samples, and writes\n results to f\"{sample_file}_results.jsonl.gz\"\n \"\"\"\n k = list(map(int, k.split(\",\")))\n"
},
{
"from": "gpt",
"value": " results = evaluate_functional_correctness(sample_file, k, n_workers, timeout, problem_file, is_mbpp)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport os\nimport cv2\nimport typing\nfrom loguru import logger\nfrom nexaflow import toolbox\nfrom nexaflow.video import VideoFrame\n\n# Code from current file (nexaflow/hook.py):\n\n\nclass BaseHook(object):\n\n def __init__(self, *_, **__):\n # logger.debug(f\"start initialing: {self.__class__.__name__} ...\")\n logger.info(f\"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...\")\n self.result = dict()\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n # info = f\"execute hook: {self.__class__.__name__}\"\n\n frame_id = frame.frame_id\n if frame_id != -1:\n # logger.debug(f\"{info}, frame id: {frame_id}\")\n pass\n return frame\n\n\nclass ExampleHook(BaseHook):\n\n def __init__(self, *_, **__):\n super().__init__(*_, **__)\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n"
},
{
"from": "gpt",
"value": " frame.data = toolbox.turn_grey(frame.data)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom uhc.khrylib.utils.torch import *\nfrom torch import nn\nfrom uhc.khrylib.models.rnn import RNN\nfrom uhc.khrylib.models.mlp import MLP\n\n# Code from current file (uhc/khrylib/models/erd_net.py):\n\n\nclass ERDNet(nn.Module):\n\n def __init__(self, state_dim):\n super().__init__()\n self.state_dim = state_dim\n"
},
{
"from": "gpt",
"value": " self.encoder_mlp = MLP(state_dim, (500,), 'relu')"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\nfrom .layers import ConvBlock, Conv3x3, upsample, disp_to_depth, coords_to_normals\nfrom timm.models.layers import trunc_normal_\nfrom .cadepth import SPM, DEM\n\n# Code from current file (networks/depth_decoder.py):\n# Copyright Niantic 2021. Patent Pending. All rights reserved.\n#\n# This software is licensed under the terms of the ManyDepth licence\n# which allows for non-commercial use only, the full terms of which are made\n# available in the LICENSE file.\n\n\n\n\nclass DepthDecoder(nn.Module):\n def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True,\n opt=None, backproject_depth=None, min_depth=0.1, max_depth=100):\n super(DepthDecoder, self).__init__()\n\n self.num_output_channels = num_output_channels\n self.use_skips = use_skips\n self.upsample_mode = 'nearest'\n self.scales = scales\n self.opt = opt\n\n self.num_ch_enc = num_ch_enc\n self.num_ch_dec = np.array([16, 32, 64, 128, 256])\n\n self.backproject_depth = backproject_depth\n self.min_depth = min_depth\n self.max_depth = max_depth\n # decoder\n self.convs = OrderedDict()\n for i in range(4, -1, -1):\n # upconv_0\n num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]\n if self.opt[\"use_surface_normal\"] and i != 4:\n num_ch_in += 3\n num_ch_out = self.num_ch_dec[i]\n self.convs[(\"upconv\", i, 0)] = ConvBlock(num_ch_in, num_ch_out)\n\n # upconv_1\n num_ch_in = self.num_ch_dec[i]\n if self.use_skips and i > 0:\n num_ch_in += self.num_ch_enc[i - 1]\n num_ch_out = self.num_ch_dec[i]\n self.convs[(\"upconv\", i, 1)] = ConvBlock(num_ch_in, num_ch_out)\n\n if self.opt['cadepth']:\n self.convs[(\"dem\", i)] = DEM(num_ch_in)\n\n for s in self.scales:\n"
},
{
"from": "gpt",
"value": " self.convs[(\"dispconv\", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.http import HttpRequest, HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views import View\nfrom posts.models import Post\nfrom .forms import CommentForm\nfrom .models import Comment\n\n# Code from current file (comments/views.py):\n\n\n\n\nclass CreateCommentView(LoginRequiredMixin, View):\n template_name = \"comments/comment_form.html\"\n login_url = \"/users/login/\"\n\n def get(self, request: HttpRequest, post_slug: str) -> HttpResponse:\n post = get_object_or_404(Post, slug=post_slug)\n form = CommentForm()\n return render(request, self.template_name, {\"form\": form, \"post\": post})\n\n def post(self, request: HttpRequest, post_slug: str) -> HttpResponse:\n form = CommentForm(request.POST)\n post = get_object_or_404(Post, slug=post_slug)\n\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = self.request.user\n comment.post_id = post.pk\n comment.save()\n return redirect(\"categories:detail\", category_slug=post.category.slug)\n\n return render(request, self.template_name, {\"form\": form, \"post\": post})\n\n\nclass UpdateCommentView(UserPassesTestMixin, View):\n template_name = \"comments/comment_update.html\"\n\n def test_func(self) -> bool:\n comment_pk = self.kwargs.get(\"comment_pk\")\n"
},
{
"from": "gpt",
"value": " comment = get_object_or_404(Comment, pk=comment_pk)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport pytest\nimport torch\nfrom torch.nn import functional as F\nfrom bitnet.bitlinear import BitLinear, absmax_quantize\nfrom bitnet.transformer import BitNetTransformer, ParallelTransformerBlock, Transformer\n\n# Code from current file (tests/tests.py):\n)\ndef test_bitlinear_shapes(in_features, out_features):\n layer = BitLinear(in_features, out_features)\n assert layer.weight.shape == (out_features, in_features)\n\n\[email protected](\"groups\", [1, 2, 5])\ndef test_bitlinear_groups(groups):\n layer = BitLinear(10, 20, groups=groups)\n assert layer.groups == groups\n\n\ndef test_bitlinear_reset_parameters():\n layer = BitLinear(10, 20)\n original_weights = layer.weight.clone()\n layer.reset_parameters()\n assert not torch.equal(original_weights, layer.weight)\n\n\[email protected](\"groups\", [1, 2, 5])\ndef test_bitlinear_forward_with_groups(random_tensor, groups):\n layer = BitLinear(10, 20, groups=groups)\n output = layer(random_tensor)\n assert output.shape == (5, 20)\n\n\ndef test_bitlinear_zero_input():\n layer = BitLinear(10, 20)\n input_tensor = torch.zeros(5, 10)\n output = layer(input_tensor)\n assert torch.allclose(output, torch.zeros(5, 20), atol=1e-2)\n\n\ndef test_bitlinear_weight_sign():\n layer = BitLinear(10, 20)\n input_tensor = torch.randn(5, 10)\n output_before = layer(input_tensor)\n layer.weight.data = torch.abs(layer.weight.data)\n output_after = layer(input_tensor)\n assert not torch.allclose(output_before, output_after)\n\n\[email protected](\"groups\", [1, 2, 5])\ndef test_bitlinear_weight_group_normalization(groups):\n layer = BitLinear(10, 20, groups=groups)\n weight = layer.weight.view(groups, -1)\n mean = weight.mean(dim=1, keepdim=True)\n assert torch.allclose(mean, torch.zeros_like(mean), atol=1e-2)\n\n\ndef test_bitlinear_weight_group_scaling():\n layer = BitLinear(10, 20, groups=5)\n weight = layer.weight.view(layer.groups, -1)\n beta = torch.abs(weight).sum(dim=1, keepdim=True) / (\n weight.shape[0] * weight.shape[1]\n )\n scaled_weight = weight * beta\n assert torch.allclose(scaled_weight, layer.weight.view(20, 10))\n\n\ndef test_bitlinear_input_quantization(random_tensor):\n layer = BitLinear(10, 20)\n quant_input, _ = absmax_quantize(random_tensor)\n output = layer(quant_input.float())\n assert output.shape == (5, 20)\n\n\n# ... Continue adding more tests ...\n# - Test the forward pass with extreme input values.\n# - Test with different types of input tensors (e.g., int8, float16).\n# - Test the forward pass with batch sizes other than 5.\n# - Verify that using different initializations produces different results.\n# - Test the weight and input interactions during the forward pass.\n# - And many more...\n\n# ================================ Transformer with bitlinear ================================\n\n\[email protected]\ndef random_tensor():\n \"\"\"A fixture to generate a random tensor\"\"\"\n return torch.randn(32, 512)\n\n\[email protected]\ndef bitnet_model():\n \"\"\"A fixture to create an instance of BitNetTransformer model\"\"\"\n return BitNetTransformer(\n num_tokens=20000,\n dim=512,\n depth=6,\n dim_head=64,\n heads=8,\n ff_mult=4,\n )\n\n\[email protected](\n \"dim, dim_head, heads, ff_mult\",\n [\n (512, 64, 8, 4),\n (256, 32, 4, 2),\n (128, 16, 2, 1),\n ],\n)\ndef test_parallel_transformer_block(dim, dim_head, heads, ff_mult, random_tensor):\n block = ParallelTransformerBlock(dim, dim_head, heads, ff_mult)\n output = block(random_tensor)\n assert output.shape == random_tensor.shape\n\n\[email protected](\n \"dim, depth, heads, dim_head, ff_mult\",\n [\n (512, 6, 8, 64, 4),\n (256, 3, 4, 32, 2),\n (128, 2, 2, 16, 1),\n ],\n)\ndef test_transformer(dim, depth, heads, dim_head, ff_mult, random_tensor):\n"
},
{
"from": "gpt",
"value": " transformer = Transformer(dim, depth, heads, dim_head, ff_mult)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom dataclasses import is_dataclass, fields\nfrom typing import Tuple, Callable, Any, Coroutine\nfrom pathlib import Path\nfrom aiodesa.utils.table import make_schema, TableSchema\nimport aiosqlite\n\n# Code from current file (aiodesa/database.py):\n\"\"\"\naiodesa.Database: Simple SQLite Database Interface\n\nThis module provides the `Db` class, a simple SQLite database interface that\nsupports asynchronous operations.\n\nClasses:\n\n- :class:`Db`: Represents a simple SQLite database interface.\n\nExample:\n\n.. code-block:: python\n\n from aiodesa import Db\n\n class Users:\n username: str\n id: str | None = None\n table_name: str = \"users\"\n\n async with Db(\"database.sqlite3\") as db:\n await db.read_table_schemas(Users)\n\"\"\"\n\n\n\nclass Db:\n \"\"\"\n Represents a simple SQLite database interface.\n\n Args:\n db_path : str\n The path to the SQLite database file.\n\n\n Example:\n\n .. code-block:: python\n\n class Users:\n username: str\n id: str | None = None\n table_name: str = \"users\"\n\n async with Db(\"database.sqlite3\") as db:\n await db.read_table_schemas(Users)\n ...\n\n \"\"\"\n\n _tables: dict\n db_path: Path\n _conn: Any\n\n def __init__(self, db_path: str) -> None:\n self.db_path = Path(db_path)\n self._conn = None\n self._create_db()\n self._tables = {}\n\n def _create_db(self) -> None:\n \"\"\"\n Internal method to create the database file if it does not exist.\n\n Notes:\n - This method is automatically called during the initialization of the\n Db class.\n - It ensures that the SQLite database file is created at the specified\n path if\n it does not exist.\n \"\"\"\n if not self.db_path.exists():\n self.db_path.parent.mkdir(parents=True, exist_ok=True)\n self.db_path.touch()\n\n async def _process_single_data_class(self, schema: Any) -> None:\n \"\"\"\n Process a single data class schema.\n\n Args:\n schema: The data class schema representing a table.\n\n Returns:\n This method does not return any value.\n \"\"\"\n if not is_dataclass(schema):\n raise ValueError(\"Provided schema is not a data class\")\n\n self._tables[schema.table_name] = schema\n class_fields = fields(schema)\n for field in class_fields:\n if field.name == \"table_name\":\n"
},
{
"from": "gpt",
"value": " schema_ = make_schema(str(field.default), schema)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport math\nimport torch\nimport torch.nn as nn\nfrom models.eg3d.volumetric_rendering.ray_marcher import MipRayMarcher2\nfrom models.eg3d.volumetric_rendering import math_utils\n\n# Code from current file (models/eg3d/volumetric_rendering/renderer.py):\n# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n# SPDX-License-Identifier: LicenseRef-NvidiaProprietary\n#\n# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual\n# property and proprietary rights in and to this material, related\n# documentation and any modifications thereto. Any use, reproduction,\n# disclosure or distribution of this material and related documentation\n# without an express license agreement from NVIDIA CORPORATION or\n# its affiliates is strictly prohibited.\n\n\"\"\"\nThe renderer is a module that takes in rays, decides where to sample along each\nray, and computes pixel colors using the volume rendering equation.\n\"\"\"\n\n\n\ndef generate_planes():\n \"\"\"\n Defines planes by the three vectors that form the \"axes\" of the\n plane. Should work with arbitrary number of planes and planes of\n arbitrary orientation.\n \"\"\"\n return torch.tensor([[[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]],\n [[1, 0, 0],\n [0, 0, 1],\n [0, 1, 0]],\n [[0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]]], dtype=torch.float32)\n\ndef project_onto_planes(planes, coordinates):\n \"\"\"\n Does a projection of a 3D point onto a batch of 2D planes,\n returning 2D plane coordinates.\n\n Takes plane axes of shape n_planes, 3, 3\n # Takes coordinates of shape N, M, 3\n # returns projections of shape N*n_planes, M, 2\n \"\"\"\n N, M, C = coordinates.shape\n n_planes, _, _ = planes.shape\n coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3)\n inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3)\n projections = torch.bmm(coordinates, inv_planes)\n return projections[..., :2]\n\ndef sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None):\n assert padding_mode == 'zeros'\n N, n_planes, C, H, W = plane_features.shape\n _, M, _ = coordinates.shape\n plane_features = plane_features.view(N*n_planes, C, H, W)\n\n coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds\n\n projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)\n output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C)\n return output_features\n\ndef sample_from_3dgrid(grid, coordinates):\n \"\"\"\n Expects coordinates in shape (batch_size, num_points_per_batch, 3)\n Expects grid in shape (1, channels, H, W, D)\n (Also works if grid has batch size)\n Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels)\n \"\"\"\n batch_size, n_coords, n_dims = coordinates.shape\n sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1),\n coordinates.reshape(batch_size, 1, 1, -1, n_dims),\n mode='bilinear', padding_mode='zeros', align_corners=False)\n N, C, H, W, D = sampled_features.shape\n sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C)\n return sampled_features\n\nclass ImportanceRenderer(torch.nn.Module):\n def __init__(self):\n super().__init__()\n"
},
{
"from": "gpt",
"value": " self.ray_marcher = MipRayMarcher2()"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom torch import nn\nfrom swap_face_fine.face_vid2vid.modules.util import AntiAliasInterpolation2d, make_coordinate_grid_2d\nfrom torchvision import models\nfrom torch.autograd import grad\nfrom torchvision import transforms\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\nimport swap_face_fine.face_vid2vid.modules.hopenet as hopenet\n\n# Code from current file (swap_face_fine/face_vid2vid/modules/model.py):\n\n\nclass Vgg19(torch.nn.Module):\n \"\"\"\n Vgg19 network for perceptual loss.\n \"\"\"\n def __init__(self, requires_grad=False):\n super(Vgg19, self).__init__()\n vgg_pretrained_features = models.vgg19(pretrained=True).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n for x in range(2):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(2, 7):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(7, 12):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(12, 21):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(21, 30):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n\n self.mean = torch.nn.Parameter(data=torch.Tensor(np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))),\n requires_grad=False)\n self.std = torch.nn.Parameter(data=torch.Tensor(np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))),\n requires_grad=False)\n\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n X = (X - self.mean) / self.std\n h_relu1 = self.slice1(X)\n h_relu2 = self.slice2(h_relu1)\n h_relu3 = self.slice3(h_relu2)\n h_relu4 = self.slice4(h_relu3)\n h_relu5 = self.slice5(h_relu4)\n out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]\n return out\n\n\nclass ImagePyramide(torch.nn.Module):\n \"\"\"\n Create image pyramide for computing pyramide perceptual loss.\n \"\"\"\n def __init__(self, scales, num_channels):\n super(ImagePyramide, self).__init__()\n downs = {}\n for scale in scales:\n"
},
{
"from": "gpt",
"value": " downs[str(scale).replace('.', '-')] = AntiAliasInterpolation2d(num_channels, scale)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport logging\nimport shutil\nimport pytest\nfrom pathlib import Path\nfrom cleo.application import Application\nfrom cleo.testers.command_tester import CommandTester\nfrom deb_pkg_tools.package import ArchiveEntry\nfrom deb_pkg_tools.package import inspect_package_contents\nfrom tests.conftest import ROS_2\nfrom tests.conftest import YieldFixture\nfrom tests.conftest import get_ros_version\n\n# Code from current file (tests/integration/commands/install/test_install_launch_file_detection.py):\n#\n# Copyright (c) Honda Research Institute Europe GmbH\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n#\nfrom __future__ import annotations\n\n\n\n\n\n\n\[email protected]()\ndef _copy_success_launch_file_project(\n rosenv_target_path: Path,\n example_project_launch_files: Path,\n) -> YieldFixture[None]:\n target_folder = rosenv_target_path.parent / \"src\"\n target_folder.mkdir(exist_ok=True, parents=True)\n\n failing_project = example_project_launch_files / \"src/launch_success\"\n\n assert failing_project.exists(), \"Failing launch file project doesn't exist!\"\n\n shutil.copytree(failing_project, target_folder / \"launch_success\")\n\n yield\n\n shutil.rmtree(target_folder, ignore_errors=True)\n\n\[email protected]()\ndef _copy_failing_launch_file_project(\n rosenv_target_path: Path,\n example_project_launch_files: Path,\n) -> YieldFixture[None]:\n target_folder = rosenv_target_path.parent / \"src\"\n target_folder.mkdir(exist_ok=True, parents=True)\n\n failing_project = example_project_launch_files / \"src/launch_fails\"\n\n assert failing_project.exists(), \"Failing launch file project doesn't exist!\"\n\n shutil.copytree(failing_project, target_folder / \"launch_fails\")\n\n yield\n\n shutil.rmtree(target_folder, ignore_errors=True)\n\n\n"
},
{
"from": "gpt",
"value": "@pytest.mark.skipif(get_ros_version() == ROS_2, reason=\"Launchfile-Checks only work in ROS1 currently\")"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom dataclasses import dataclass\nfrom typing import Callable, Protocol\nfrom .api import Array\nfrom .interpreter import forward_laplacian\nimport jax\nimport jax.numpy as jnp\n\n# Code from current file (folx/operators.py):\n\n\n\n\n__all__ = [\n \"Laplacian\",\n \"LaplacianOperator\",\n \"ForwardLaplacianOperator\",\n \"LoopLaplacianOperator\",\n \"ParallelLaplacianOperator\",\n]\n\n\nclass Laplacian(Protocol):\n"
},
{
"from": "gpt",
"value": " def __call__(self, x: Array) -> tuple[Array, Array]:"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport os\nimport argparse\nimport pinecone\nfrom dotenv import load_dotenv\nfrom context import download_embeddings\nfrom utils.utils import batch\nfrom constants import (\n INDEX_NAME,\n INDEX_ENVIRONMENT,\n NAMESPACE,\n PATH_TO_SRC_CODE,\n)\nfrom code_indexer import CodeIndexer\n\n# Code from current file (scripts.py):\n\n\n\nload_dotenv()\nPINECONE_API_KEY = os.getenv(\"PINECONE_API_KEY\")\npinecone.init(api_key=PINECONE_API_KEY, environment=INDEX_ENVIRONMENT)\nindex = pinecone.Index(INDEX_NAME)\n\n\ndef read_and_upsert(library_name):\n df = download_embeddings(library_name)\n\n def convert_row_to_dict(row):\n return {\n \"id\": row[\"id\"],\n \"values\": [float(value) for value in row[\"dense_embeddings\"]],\n \"sparse_values\": dict(row[\"sparse_values\"]),\n \"metadata\": {**dict(row[\"metadata\"]), \"type\": \"documentation\"},\n }\n\n df[\"dict\"] = df.apply(convert_row_to_dict, axis=1)\n vectors = df[\"dict\"].tolist()\n\n vec_batches = list(batch(vectors, 100))\n for idx, vec_batch in enumerate(vec_batches):\n print(f\"Upserting batch {idx}/{len(vec_batches)}...\")\n index.upsert(vectors=vec_batch, namespace=NAMESPACE)\n\n print(\"Finished upserting\")\n\n\ndef read_and_upsert_source_code():\n"
},
{
"from": "gpt",
"value": " _ = CodeIndexer(src_dir=PATH_TO_SRC_CODE)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom typing import Optional\nfrom einops import repeat\nfrom jaxtyping import Float\nfrom torch import Tensor\nfrom .coordinate_conversion import generate_conversions\nfrom .rendering import render_over_image\nfrom .types import Pair, Scalar, Vector, sanitize_scalar, sanitize_vector\nimport torch\n\n# Code from current file (src/visualization/drawing/points.py):\n\n\n\n\ndef draw_points(\n image: Float[Tensor, \"3 height width\"],\n points: Vector,\n color: Vector = [1, 1, 1],\n radius: Scalar = 1,\n inner_radius: Scalar = 0,\n num_msaa_passes: int = 1,\n x_range: Optional[Pair] = None,\n y_range: Optional[Pair] = None,\n) -> Float[Tensor, \"3 height width\"]:\n device = image.device\n points = sanitize_vector(points, 2, device)\n color = sanitize_vector(color, 3, device)\n radius = sanitize_scalar(radius, device)\n inner_radius = sanitize_scalar(inner_radius, device)\n (num_points,) = torch.broadcast_shapes(\n points.shape[0],\n color.shape[0],\n radius.shape,\n inner_radius.shape,\n )\n\n # Convert world-space points to pixel space.\n _, h, w = image.shape\n"
},
{
"from": "gpt",
"value": " world_to_pixel, _ = generate_conversions((h, w), device, x_range, y_range)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom abc import ABC, abstractmethod\nfrom .multimodal_encoder.builder import build_vision_tower\nfrom .multimodal_projector.builder import build_vision_projector\nfrom llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN\nimport torch\nimport torch.nn as nn\n\n# Code from current file (llava/model/llava_arch.py):\n# Copyright 2023 Haotian Liu\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\n\n\n\n\nclass LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n"
},
{
"from": "gpt",
"value": " self.vision_tower = build_vision_tower(config, delay_load=True)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom os import getenv\nfrom time import sleep\nfrom tqdm import tqdm\nfrom .LLM_task import LLM_task\nfrom src.srt_util.srt import split_script\nimport logging\n\n# Code from current file (src/translators/translation.py):\n\n\ndef get_translation(srt, model, video_name, prompt = None, chunk_size = 1000):\n # print(srt.get_source_only())\n script_arr, range_arr = split_script(srt.get_source_only(),chunk_size)\n translate(srt, script_arr, range_arr, model, video_name, task=prompt)\n pass\n\ndef check_translation(sentence, translation):\n \"\"\"\n check merge sentence issue from openai translation\n \"\"\"\n sentence_count = sentence.count('\\n\\n') + 1\n translation_count = translation.count('\\n\\n') + 1\n\n if sentence_count != translation_count:\n return False\n else:\n return True\n\n# TODO{david}: prompts selector\ndef prompt_selector(src_lang, tgt_lang, domain):\n language_map = {\n \"EN\": \"English\",\n \"ZH\": \"Chinese\",\n \"ES\": \"Spanish\",\n \"FR\": \"France\",\n \"DE\": \"Germany\",\n \"RU\": \"Russian\",\n \"JA\": \"Japanese\",\n \"AR\": \"Arabic\",\n }\n try:\n src_lang = language_map[src_lang]\n tgt_lang = language_map[tgt_lang]\n except:\n print(\"Unsupported language, is your abbreviation correct?\")\n logging.info(\"Unsupported language detected\")\n prompt = f\"\"\"\n you are a translation assistant, your job is to translate a video in domain of {domain} from {src_lang} to {tgt_lang}, \n you will be provided with a segement in {src_lang} parsed by line, where your translation text should keep the original \n meaning and the number of lines.\n \"\"\"\n return prompt\n\ndef translate(srt, script_arr, range_arr, model_name, video_name=None, attempts_count=5, task=None, temp = 0.15):\n \"\"\"\n Translates the given script array into another language using the chatgpt and writes to the SRT file.\n\n This function takes a script array, a range array, a model name, a video name, and a video link as input. It iterates\n through sentences and range in the script and range arrays. If the translation check fails for five times, the function\n will attempt to resolve merge sentence issues and split the sentence into smaller tokens for a better translation.\n\n :param srt: An instance of the Subtitle class representing the SRT file.\n :param script_arr: A list of strings representing the original script sentences to be translated.\n :param range_arr: A list of tuples representing the start and end positions of sentences in the script.\n :param model_name: The name of the translation model to be used.\n :param video_name: The name of the video.\n :param attempts_count: Number of attemps of failures for unmatched sentences.\n :param task: Prompt.\n :param temp: Model temperature.\n \"\"\"\n\n if input is None: \n raise Exception(\"Warning! No Input have passed to LLM!\")\n if task is None:\n task = \"你是一个翻译助理,你的任务是翻译视频,你会被提供一个按行分割的英文段落,你需要在保证句意和行数的情况下输出翻译后的文本。\"\n logging.info(f\"translation prompt: {task}\")\n previous_length = 0\n for sentence, range_ in tqdm(zip(script_arr, range_arr)):\n # update the range based on previous length\n range_ = (range_[0] + previous_length, range_[1] + previous_length)\n # using chatgpt model\n print(f\"now translating sentences {range_}\")\n logging.info(f\"now translating sentences {range_}\")\n flag = True\n while flag:\n flag = False\n try:\n"
},
{
"from": "gpt",
"value": " translate = LLM_task(model_name, sentence, task, temp)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport torch\nimport torch_scatter\nfrom drugood.models.algorithms.base import BaseAlgorithm\nfrom ..builder import MODELS, build_tasker\n\n# Code from current file (drugood/models/algorithms/groupdro.py):\n# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.\n\n\n\[email protected]_module()\nclass GroupDRO(BaseAlgorithm):\n \"\"\"\n Group distributionally robust optimization.\n Original paper:\n @inproceedings{sagawa2019distributionally,\n title={Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization},\n author={Sagawa, Shiori and Koh, Pang Wei and Hashimoto, Tatsunori B and Liang, Percy},\n booktitle={International Conference on Learning Representations},\n year={2019}\n }\n The GroupDRO implementation below is adapted from Wilds's implementation:\n https://github.com/p-lambda/wilds/blob/a7a452c80cad311cf0aabfd59af8348cba1b9861/examples/algorithms/groupDRO.py\n \"\"\"\n\n def __init__(self,\n tasker,\n num_groups=44930,\n group_dro_step_size=0.01,\n ):\n super().__init__()\n"
},
{
"from": "gpt",
"value": " self.tasker = build_tasker(tasker)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport csv\nimport json\nimport logging\nimport os\nimport numpy as np\nimport psycopg2\n import psycopg2\nfrom typing import List\nfrom qdrant_client import QdrantClient\nfrom transformers import AutoModel\nfrom agent_search.core import AgentSearchResult\nfrom agent_search.core.utils import (\n cosine_similarity,\n get_data_path,\n load_config,\n)\n\n# Code from current file (agent_search/search/base.py):\n\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass WebSearchEngine:\n \"\"\"A simple search client for the OpenSearch collection\"\"\"\n\n def __init__(\n self,\n ):\n try:\n except ImportError as e:\n raise ImportError(\n f\"Error {e} while imoprting psycopg2. Please install it with `pip install psycopg2` to run an WebSearchEngine instance.\"\n )\n\n # Load config\n"
},
{
"from": "gpt",
"value": " self.config = load_config()[\"agent_search\"]"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport os\nimport re\nimport time\nimport interpreter\nfrom random import randint\nfrom interpreter.utils.count_tokens import count_messages_tokens, count_tokens\n\n# Code from current file (tests/test_interpreter.py):\n Round to 2 decimal places.\n \"\"\".strip()\n\n messages = interpreter.chat(order_of_operations_message)\n\n assert str(round(test_result, 2)) in messages[-1][\"message\"]\n\n\ndef test_delayed_exec():\n interpreter.chat(\n \"\"\"Can you write a single block of code and execute it that prints something, then delays 1 second, then prints something else? No talk just code. Thanks!\"\"\"\n )\n\n\ndef test_nested_loops_and_multiple_newlines():\n interpreter.chat(\n \"\"\"Can you write a nested for loop in python and shell and run them? Don't forget to properly format your shell script and use semicolons where necessary. Also put 1-3 newlines between each line in the code. Only generate and execute the code. No explanations. Thanks!\"\"\"\n )\n\n\ndef test_write_to_file():\n interpreter.chat(\"\"\"Write the word 'Washington' to a .txt file called file.txt\"\"\")\n assert os.path.exists(\"file.txt\")\n interpreter.messages = [] # Just reset message history, nothing else for this test\n messages = interpreter.chat(\n \"\"\"Read file.txt in the current directory and tell me what's in it.\"\"\"\n )\n assert \"Washington\" in messages[-1][\"message\"]\n\n\ndef test_markdown():\n interpreter.chat(\n \"\"\"Hi, can you test out a bunch of markdown features? Try writing a fenced code block, a table, headers, everything. DO NOT write the markdown inside a markdown code block, just write it raw.\"\"\"\n )\n\n\ndef test_generator():\n start_of_message_emitted = False\n end_of_message_emitted = False\n start_of_code_emitted = False\n end_of_code_emitted = False\n executing_emitted = False\n end_of_execution_emitted = False\n\n for chunk in interpreter.chat(\"What's 38023*40334?\", stream=True, display=False):\n print(chunk)\n if \"start_of_message\" in chunk:\n start_of_message_emitted = True\n if \"end_of_message\" in chunk:\n end_of_message_emitted = True\n if \"start_of_code\" in chunk:\n start_of_code_emitted = True\n if \"end_of_code\" in chunk:\n end_of_code_emitted = True\n if \"executing\" in chunk:\n executing_emitted = True\n if \"end_of_execution\" in chunk:\n end_of_execution_emitted = True\n\n assert start_of_message_emitted\n assert end_of_message_emitted\n assert start_of_code_emitted\n assert end_of_code_emitted\n assert executing_emitted\n assert end_of_execution_emitted\n\n\ndef test_config_loading():\n # because our test is running from the root directory, we need to do some\n # path manipulation to get the actual path to the config file or our config\n # loader will try to load from the wrong directory and fail\n currentPath = os.path.dirname(os.path.abspath(__file__))\n config_path = os.path.join(currentPath, \"./config.test.yaml\")\n\n interpreter.extend_config(config_path=config_path)\n\n # check the settings we configured in our config.test.yaml file\n temperature_ok = interpreter.temperature == 0.25\n model_ok = interpreter.model == \"gpt-3.5-turbo\"\n debug_mode_ok = interpreter.debug_mode == True\n\n assert temperature_ok and model_ok and debug_mode_ok\n\n\ndef test_system_message_appending():\n ping_system_message = (\n \"Respond to a `ping` with a `pong`. No code. No explanations. Just `pong`.\"\n )\n\n ping_request = \"ping\"\n pong_response = \"pong\"\n\n interpreter.system_message += ping_system_message\n\n messages = interpreter.chat(ping_request)\n\n assert messages == [\n {\"role\": \"user\", \"message\": ping_request},\n {\"role\": \"assistant\", \"message\": pong_response},\n ]\n\n\ndef test_reset():\n # make sure that interpreter.reset() clears out the messages Array\n assert interpreter.messages == []\n\n\ndef test_token_counter():\n system_tokens = count_tokens(\n text=interpreter.system_message, model=interpreter.model\n )\n\n prompt = \"How many tokens is this?\"\n\n prompt_tokens = count_tokens(text=prompt, model=interpreter.model)\n\n messages = [\n {\"role\": \"system\", \"message\": interpreter.system_message}\n ] + interpreter.messages\n\n"
},
{
"from": "gpt",
"value": " system_token_test = count_messages_tokens("
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport torch\nimport torch.nn.functional as F\nimport math\nfrom torch.nn import Dropout\nfrom torch import nn\nfrom functools import reduce\nfrom operator import mul\nfrom .Encoder_utils import LayerNorm, Transformer, Attention\n\n# Code from current file (tools/models/ViT_Encoder_add.py):\n\n\n\n\nclass SPTCLIPVisionTransformer(nn.Module):\n def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[5,6,7,8,11], pretrained=None, get_embeddings=True, \n num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs):\n super().__init__()\n self.pretrained = pretrained\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n self.spatial_size = input_resolution // patch_size\n"
},
{
"from": "gpt",
"value": " self.ln_pre = LayerNorm(width)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport copy\nimport datetime\nimport logging\nimport os\nimport matplotlib as mpl\nimport torch\nfrom torch.nn.functional import softmax\nfrom tqdm import tqdm\nfrom utils.misc import load_model_transform\nfrom .evaluation import evaluate as e\nfrom .utils import load_dataset, load_model\n\n# Code from current file (inference/modelvshuman/model_evaluator.py):\n\n\n\n\nlogger = logging.getLogger(__name__)\nMAX_NUM_MODELS_IN_CACHE = 3\n\nmpl.rcParams['font.size'] = 22\n\n\ndef device():\n return torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass ModelEvaluator:\n\n def _pytorch_evaluator(self, model_name, model, dataset, *args, **kwargs):\n \"\"\"\n Evaluate Model on the given dataset and return the accuracy.\n Args:\n model_name:\n model:\n dataset:\n *args:\n **kwargs:\n \"\"\"\n logging_info = f\"Evaluating model {model_name} on dataset {dataset.name} using Pytorch Evaluator\"\n logger.info(logging_info)\n print(logging_info)\n for metric in dataset.metrics:\n metric.reset()\n with torch.no_grad():\n result_writer = e.ResultPrinter(model_name=model_name,\n dataset=dataset)\n\n for images, target, paths in tqdm(dataset.loader):\n images = images.to(device())\n if \"forward_batch\" in dir(model):\n logits = model.forward_batch(images)\n softmax_output = model.softmax(logits)\n else:\n logits = model(images)\n softmax_output = softmax(logits,\n dim=1).detach().cpu().numpy()\n\n if isinstance(target, torch.Tensor):\n batch_targets = model.to_numpy(target)\n else:\n batch_targets = target\n predictions = dataset.decision_mapping(softmax_output)\n for metric in dataset.metrics:\n metric.update(predictions, batch_targets, paths)\n if kwargs[\"print_predictions\"]:\n result_writer.print_batch_to_csv(\n object_response=predictions,\n batch_targets=batch_targets,\n paths=paths)\n\n def _get_datasets(self, dataset_names, *args, **kwargs):\n dataset_list = []\n for dataset in dataset_names:\n"
},
{
"from": "gpt",
"value": " dataset = load_dataset(dataset, *args, **kwargs)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport unittest\nimport pytest\nfrom pydantic import ValidationError\nfrom ray_vllm_inference.prompt_format import Message, Prompt, PromptFormat\n\n# Code from current file (tests/prompt_format_test.py):\n# Adapted from:\n# https://github.com/ray-project/ray-llm/blob/master/rayllm/common/models.py\n\n\nclass PromptFormatCases(unittest.TestCase):\n\n def test_prompt_format_with_prompt_obj(self):\n prompt_format = PromptFormat(\n system=\"[system] {instruction} [/system] \",\n assistant=\"[assistant] {instruction} [/assistant] \",\n trailing_assistant=\"[assistant]\",\n user=\"[user] {instruction} [/user] \",\n default_system_message=\"\",\n )\n prompt = prompt_format.generate_prompt(\n Prompt(\n prompt=\"hello1\",\n use_prompt_format=True,\n )\n )\n assert prompt == \"[user] hello1 [/user] [assistant]\"\n prompt = prompt_format.generate_prompt(\n Prompt(\n prompt=\"hello1\",\n use_prompt_format=False,\n )\n )\n assert prompt == \"hello1\"\n\n\n def test_prompt_format(self):\n prompt_format = PromptFormat(\n system=\"[system] {instruction} [/system] \",\n assistant=\"[assistant] {instruction} [/assistant] \",\n trailing_assistant=\"[assistant]\",\n user=\"[user] {instruction} [/user] \",\n default_system_message=\"\",\n )\n # Only user, no system\n"
},
{
"from": "gpt",
"value": " messages = [Message(role=\"user\", content=\"hello1\")]"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom ...__Import import *\nfrom ...Variable import Listener, Poll, Variable\n\n# Code from current file (PotatoWidgets/Widget/_Common/_BasicProps.py):\n\n\nclass BasicProps(Gtk.Widget):\n def __init__(\n self,\n halign,\n valign,\n hexpand,\n vexpand,\n active,\n visible,\n classname,\n # tooltip,\n css,\n size=[10, 10],\n ):\n Gtk.Widget.__init__(self)\n self.set_hexpand(True if hexpand else False)\n self.set_vexpand(True if vexpand else False)\n self.set_halign(halign)\n self.set_valign(valign)\n self.set_visible(visible)\n self.set_sensitive(active) if active is not None else None\n self.set_classname(classname)\n self.__clasif_size(size)\n self.apply_css(css) if css else None\n\n for key, value in locals().items():\n callback = {\n \"halign\": self.set_halign,\n \"valign\": self.set_valign,\n \"hexpand\": self.set_hexpand,\n \"vexpand\": self.set_vexpand,\n \"active\": self.set_sensitive,\n \"visible\": self.set_visible,\n \"size\": self.set_size,\n \"classname\": self.set_classname,\n }.get(key)\n\n self.bind(value, callback) if callback else None\n\n def set_size(self, size):\n self.__clasif_size(size)\n\n def set_halign(self, param):\n super().set_halign(self.__clasif_align(str(param)))\n\n def set_valign(self, param):\n super().set_valign(self.__clasif_align(str(param)))\n\n def __clasif_size(self, size):\n if isinstance(size, int):\n self.set_size_request(size, size)\n elif isinstance(size, list):\n if len(size) == 2:\n self.set_size_request(size[0], size[1])\n elif len(size) == 1:\n self.set_size_request(size[0], size[0])\n\n def __clasif_align(self, param):\n dict = {\n \"fill\": Gtk.Align.FILL,\n \"start\": Gtk.Align.START,\n \"end\": Gtk.Align.END,\n \"center\": Gtk.Align.CENTER,\n \"baseline\": Gtk.Align.BASELINE,\n }\n return dict.get(param.lower(), Gtk.Align.FILL)\n\n def set_classname(self, param):\n if isinstance(param, (str)):\n context = self.get_style_context()\n [context.add_class(i) for i in param.split(\" \") if i != \" \"]\n elif isinstance(param, (list)):\n for i in param:\n"
},
{
"from": "gpt",
"value": " if isinstance(i, (Listener, Variable, Poll)):"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport os\nimport shutil\nfrom music21 import converter as converterm21\nfrom pyMV2H.utils.mv2h import MV2H\nfrom pyMV2H.metrics.mv2h import mv2h\nfrom pyMV2H.utils.music import Music\nfrom pyMV2H.converter.midi_converter import MidiConverter as Converter\nfrom .encoding_convertions import VOICE_CHANGE_TOKEN, STEP_CHANGE_TOKEN\n\n# Code from current file (my_utils/metrics.py):\n\n\n\n\ndef compute_metrics(y_true, y_pred):\n ################################# Sym-ER and Seq-ER:\n metrics = compute_ed_metrics(y_true=y_true, y_pred=y_pred)\n ################################# MV2H:\n mv2h_dict = compute_mv2h_metrics(y_true=y_true, y_pred=y_pred)\n metrics.update(mv2h_dict)\n return metrics\n\n\n#################################################################### SYM-ER AND SEQ-ER:\n\n\ndef compute_ed_metrics(y_true, y_pred):\n def levenshtein(a, b):\n n, m = len(a), len(b)\n\n if n > m:\n a, b = b, a\n n, m = m, n\n\n current = range(n + 1)\n for i in range(1, m + 1):\n previous, current = current, [i] + [0] * n\n for j in range(1, n + 1):\n add, delete = previous[j] + 1, current[j - 1] + 1\n change = previous[j - 1]\n if a[j - 1] != b[i - 1]:\n change = change + 1\n current[j] = min(add, delete, change)\n\n return current[n]\n\n ed_acc = 0\n length_acc = 0\n label_acc = 0\n for t, h in zip(y_true, y_pred):\n ed = levenshtein(t, h)\n ed_acc += ed\n length_acc += len(t)\n if ed > 0:\n label_acc += 1\n\n return {\n \"sym-er\": 100.0 * ed_acc / length_acc,\n \"seq-er\": 100.0 * label_acc / len(y_pred),\n }\n\n\n#################################################################### MV2H:\n\n\ndef compute_mv2h_metrics(y_true, y_pred):\n def krn2midi(in_file):\n a = converterm21.parse(in_file).write(\"midi\")\n midi_file = a.name\n shutil.copyfile(a, midi_file)\n os.remove(in_file)\n return midi_file\n\n def midi2txt(midi_file):\n txt_file = midi_file.replace(\"mid\", \"txt\")\n converter = Converter(file=midi_file, output=txt_file)\n converter.convert_file()\n with open(txt_file, \"r\") as fin:\n f = [u.replace(\".0\", \"\") for u in fin.readlines()]\n with open(txt_file, \"w\") as fout:\n for u in f:\n fout.write(u)\n os.remove(midi_file)\n return txt_file\n\n ########################################### Polyphonic evaluation:\n\n def eval_as_polyphonic():\n # Convert to MIDI\n reference_midi_file = krn2midi(\"true.krn\")\n predicted_midi_file = krn2midi(\"pred.krn\")\n\n # Convert to TXT\n reference_txt_file = midi2txt(reference_midi_file)\n predicted_txt_file = midi2txt(predicted_midi_file)\n\n # Compute MV2H\n reference_file = Music.from_file(reference_txt_file)\n transcription_file = Music.from_file(predicted_txt_file)\n res_dict = MV2H(multi_pitch=0, voice=0, meter=0, harmony=0, note_value=0)\n try:\n res_dict = mv2h(reference_file, transcription_file)\n except:\n pass\n\n # Remove auxiliar files\n os.remove(reference_txt_file)\n os.remove(predicted_txt_file)\n\n return res_dict\n\n ########################################### Monophonic evaluation:\n\n def get_number_of_voices(kern):\n num_voices = 0\n for token in kern:\n if token == VOICE_CHANGE_TOKEN:\n continue\n"
},
{
"from": "gpt",
"value": " if token == STEP_CHANGE_TOKEN:"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom functools import partial\nfrom ml_collections import ConfigDict\nfrom flax.training.train_state import TrainState\nfrom .jax_utils import (\n next_rng, value_and_multi_grad, mse_loss, JaxRNG, wrap_function_with_rng,\n collect_jax_metrics\n)\nfrom .model import Scalar, update_target_network\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport flax\nimport flax.linen as nn\nimport optax\nimport distrax\n\n# Code from current file (jax_cql/JaxCQL/sac.py):\n\n\n\n\n\nclass SAC(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.discount = 0.99\n config.alpha_multiplier = 1.0\n config.use_automatic_entropy_tuning = True\n config.backup_entropy = False\n config.target_entropy = 0.0\n config.policy_lr = 3e-4\n config.qf_lr = 3e-4\n config.optimizer_type = 'adam'\n config.soft_target_update_rate = 5e-3\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, policy, qf):\n self.config = self.get_default_config(config)\n self.policy = policy\n self.qf = qf\n self.observation_dim = policy.observation_dim\n self.action_dim = policy.action_dim\n\n self._train_states = {}\n\n optimizer_class = {\n 'adam': optax.adam,\n 'sgd': optax.sgd,\n }[self.config.optimizer_type]\n\n policy_params = self.policy.init(\n next_rng(self.policy.rng_keys()),\n jnp.zeros((10, self.observation_dim))\n )\n self._train_states['policy'] = TrainState.create(\n params=policy_params,\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n\n qf1_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf1'] = TrainState.create(\n params=qf1_params,\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None,\n )\n qf2_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf2'] = TrainState.create(\n params=qf2_params,\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None,\n )\n self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})\n\n model_keys = ['policy', 'qf1', 'qf2']\n\n if self.config.use_automatic_entropy_tuning:\n"
},
{
"from": "gpt",
"value": " self.log_alpha = Scalar(0.0)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom aiogram import Router, Bot, F\nfrom aiogram.types import Message\nfrom app.db.functions import User\nfrom app.db.functions import Card\nfrom app.keyboards.inline import get_approve_keyboard\nfrom app.config import Config\n\n# Code from current file (app/handlers/user/file.py):\n\n\nrouter = Router()\n\n\[email protected](F.content_type.in_({\"photo\", \"video\", \"animation\"}))\nasync def get_postcard(message: Message, bot: Bot, config: Config):\n if await Card.check_exists(message.from_user.id):\n await message.answer(\"Вы уже отправили свою открытку!\")\n return\n\n postcard_type = message.content_type\n if message.photo is not None:\n file_id = message.photo[-1].file_id\n elif message.video is not None:\n file_id = message.video.file_id\n elif message.animation is not None:\n file_id = message.animation.file_id\n else:\n file_id = None\n\n user_id = message.from_user.id\n chat_id = config.settings.chat_id\n"
},
{
"from": "gpt",
"value": " if not await User.is_registered(user_id):"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport json\nimport os\nimport random\nimport torch\nimport torch\nimport numpy as np\nimport decord\nimport os,glob\nfrom pandas import Categorical\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nfrom PIL import ImageFile\nfrom decord import VideoReader\nfrom data.utils import pre_caption\nfrom .randaugment import TemporalConsistentRandomAugment\n\n# Code from current file (data/pretrain_dataset.py):\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nImage.MAX_IMAGE_PIXELS = None\n\n\n\ndecord.bridge.set_bridge('torch')\n\nclass pretrain_dataset(Dataset):\n def __init__(self, ann_file, laion_path, transform): \n\n self.ann_pretrain = []\n for f in ann_file:\n print('loading '+f)\n ann = json.load(open(f,'r'))\n self.ann_pretrain += ann\n \n self.laion_path = laion_path\n if self.laion_path:\n self.laion_files = glob.glob(os.path.join(laion_path,'*.json'))\n\n print('loading '+self.laion_files[0])\n with open(self.laion_files[0],'r') as f:\n self.ann_laion = json.load(f) \n\n self.annotation = self.ann_pretrain + self.ann_laion\n else:\n self.annotation = self.ann_pretrain\n \n self.transform = transform\n\n\n def reload_laion(self, epoch):\n n = epoch%len(self.laion_files)\n print('loading '+self.laion_files[n])\n with open(self.laion_files[n],'r') as f:\n self.ann_laion = json.load(f) \n \n self.annotation = self.ann_pretrain + self.ann_laion \n \n \n def __len__(self):\n return len(self.annotation)\n \n def __getitem__(self, index): \n \n ann = self.annotation[index] \n \n image = Image.open(ann['image']).convert('RGB') \n image = self.transform(image)\n"
},
{
"from": "gpt",
"value": " caption = pre_caption(ann['caption'],30)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as init\nfrom .kb_utils import KBAFunction, LayerNorm2d, SimpleGate\n\n# Code from current file (src/spandrel/architectures/KBNet/arch/kbnet_s.py):\n# type: ignore\n\n\n\n\nclass KBBlock_s(nn.Module):\n def __init__(\n self, c, DW_Expand=2, FFN_Expand=2, nset=32, k=3, gc=4, lightweight=False\n ):\n super().__init__()\n self.k, self.c = k, c\n self.nset = nset\n dw_ch = int(c * DW_Expand)\n ffn_ch = int(FFN_Expand * c)\n\n self.g = c // gc\n self.w = nn.Parameter(torch.zeros(1, nset, c * c // self.g * self.k**2))\n self.b = nn.Parameter(torch.zeros(1, nset, c))\n self.init_p(self.w, self.b)\n\n"
},
{
"from": "gpt",
"value": " self.norm1 = LayerNorm2d(c)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport math\nimport torch\nimport torch.nn.functional as F\n import xformers\n import xformers.ops\nfrom functools import partial\nfrom inspect import isfunction\nfrom einops import rearrange, repeat\nfrom torch import einsum, nn\nfrom lvdm.basics import conv_nd, normalization, zero_module\nfrom lvdm.common import checkpoint, default, exists, init_, max_neg_value, uniq\n\n# Code from current file (lvdm/modules/attention.py):\n\n\ntry:\n XFORMERS_IS_AVAILBLE = True\nexcept:\n XFORMERS_IS_AVAILBLE = False\n\n\nclass RelativePosition(nn.Module):\n \"\"\" https://github.com/evelinehong/Transformer_Relative_Position_PyTorch/blob/master/relative_position.py \"\"\"\n\n def __init__(self, num_units, max_relative_position):\n super().__init__()\n self.num_units = num_units\n self.max_relative_position = max_relative_position\n self.embeddings_table = nn.Parameter(torch.Tensor(max_relative_position * 2 + 1, num_units))\n nn.init.xavier_uniform_(self.embeddings_table)\n\n def forward(self, length_q, length_k):\n device = self.embeddings_table.device\n range_vec_q = torch.arange(length_q, device=device)\n range_vec_k = torch.arange(length_k, device=device)\n distance_mat = range_vec_k[None, :] - range_vec_q[:, None]\n distance_mat_clipped = torch.clamp(distance_mat, -self.max_relative_position, self.max_relative_position)\n final_mat = distance_mat_clipped + self.max_relative_position\n # final_mat = th.LongTensor(final_mat).to(self.embeddings_table.device)\n # final_mat = th.tensor(final_mat, device=self.embeddings_table.device, dtype=torch.long)\n final_mat = final_mat.long()\n embeddings = self.embeddings_table[final_mat]\n return embeddings\n\n\nclass CrossAttention(nn.Module):\n\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., \n relative_position=False, temporal_length=None):\n super().__init__()\n inner_dim = dim_head * heads\n"
},
{
"from": "gpt",
"value": " context_dim = default(context_dim, query_dim)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport numpy as np\nimport torch\nfrom .base import LazyMultiStepMemory\nfrom .segment_tree import SumTree, MinTree\n\n# Code from current file (sacd/memory/per.py):\n\n\nclass LazyPrioritizedMultiStepMemory(LazyMultiStepMemory):\n\n def __init__(self, capacity, state_shape, device, gamma=0.99,\n multi_step=3, alpha=0.6, beta=0.4, beta_steps=2e5,\n min_pa=0.0, max_pa=1.0, eps=0.01):\n super().__init__(capacity, state_shape, device, gamma, multi_step)\n\n self.alpha = alpha\n self.beta = beta\n self.beta_diff = (1.0 - beta) / beta_steps\n self.min_pa = min_pa\n self.max_pa = max_pa\n self.eps = eps\n self._cached = None\n\n it_capacity = 1\n while it_capacity < capacity:\n it_capacity *= 2\n self.it_sum = SumTree(it_capacity)\n"
},
{
"from": "gpt",
"value": " self.it_min = MinTree(it_capacity)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport logging\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor, nn\nfrom ..melspec import MelSpectrogram\nfrom .hparams import HParams\nfrom .unet import UNet\n\n# Code from current file (resemble_enhance/denoiser/denoiser.py):\n\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _normalize(x: Tensor) -> Tensor:\n return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7)\n\n\nclass Denoiser(nn.Module):\n @property\n def stft_cfg(self) -> dict:\n hop_size = self.hp.hop_size\n return dict(hop_length=hop_size, n_fft=hop_size * 4, win_length=hop_size * 4)\n\n @property\n def n_fft(self):\n return self.stft_cfg[\"n_fft\"]\n\n @property\n def eps(self):\n return 1e-7\n\n def __init__(self, hp: HParams):\n super().__init__()\n self.hp = hp\n self.net = UNet(input_dim=3, output_dim=3)\n"
},
{
"from": "gpt",
"value": " self.mel_fn = MelSpectrogram(hp)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport sys\nimport logging\nfrom eviloauth.idp import IDP\nfrom eviloauth.exceptions import EviloauthCommandException\n\n# Code from current file (eviloauth/dispatcher.py):\n\n\nclass Dispatcher:\n def __init__(self, flask_server, module_dict, cache, redirect_server):\n logging.debug('Initializing dispatcher')\n logging.debug(f'\\tFlask server: {flask_server}')\n logging.debug(f'\\tModule dict: {module_dict}')\n logging.debug(f'\\tCache: {cache}')\n logging.debug(f'\\tRedirect server: {redirect_server}')\n\n self.flask_server = flask_server\n self.module_dict = module_dict\n self.cache = cache\n self.redirect_server = redirect_server\n\n def dispatch(self, commands):\n cmd, sub, arg, *args = commands.split(' ') + [None, None, None]\n\n if cmd == 'exit':\n self.dispatch_exit()\n\n elif cmd == 'module':\n self.dispatch_module(cmd, sub, arg)\n\n elif cmd == 'tokens':\n self.dispatch_tokens(cmd, sub)\n\n elif cmd == 'idp':\n self.dispatch_idp(cmd, sub, arg)\n\n elif cmd == 'target':\n self.dispatch_target(cmd, sub, arg)\n\n else:\n raise EviloauthCommandException(\n 'Unknown command %s' % cmd)\n\n def dispatch_exit(self):\n print('Exiting...')\n self.flask_server.shutdown()\n sys.exit()\n\n def dispatch_module(self, cmd, sub, arg):\n mod = self.module_dict[f'eviloauth.{cmd}.{sub}.{arg}']\n mod.__run__(self.cache.get('target'), 0)\n\n def dispatch_tokens(self, cmd, sub):\n general_tokens = self.cache.get('tokens')\n if sub == 'list':\n print([v for v in general_tokens.keys()])\n elif sub == 'add':\n logging.error('Not implemented yet')\n else:\n raise EviloauthCommandException(\n 'Unknown \"%s\" command %s' % (cmd, sub))\n\n def dispatch_idp(self, cmd, sub, arg):\n if sub == 'list':\n print('Current IDP: %s' % self.cache.get('idp'))\n elif sub == 'configure':\n"
},
{
"from": "gpt",
"value": " idp = IDP(arg, self.redirect_server)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom .ipynb.ipynb_analyse import parse_ipynb, get_ipynb_content, get_model_list, model_list2python\nfrom .py.py_analyse import extract_function_defs, get_function_defs, get_intro_of_fun\n from ..llm.Ernie import Ernie\n from ..llm.Ernie import Ernie\n\n# Code from current file (codes/extraction/ModuleTools.py):\n\nclass ModuleTools(object):\n def __init__(self, llm_type=\"Ernie\"):\n super.__init__()\n if llm_type==\"Ernie\":\n self.llm = Ernie()\n else: # default set ernie as used llm\n self.llm = Ernie()\n\n def ipynb2py(self, ipynb_path = \"example.ipynb\", prompt = \"\"):\n"
},
{
"from": "gpt",
"value": " result = parse_ipynb(ipynb_path)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom django.shortcuts import render\nfrom .models import *\nfrom django.http import JsonResponse\nfrom datetime import datetime\nfrom .utiles import commandeAnonyme, data_cookie, panier_cookie\nimport json\n\n# Code from current file (myShop/shop/views.py):\n\ndef shop(request, *args, **kwargs):\n \"\"\" vue principale \"\"\"\n\n produits = Produit.objects.all()\n data = data_cookie(request)\n nombre_article = data['nombre_article']\n\n context = {\n 'produits':produits,\n 'nombre_article': nombre_article\n }\n\n return render(request, 'shop/index.html', context)\n\n\ndef panier(request, *args, **kwargs):\n \"\"\" panier \"\"\"\n \n data = data_cookie(request)\n articles = data['articles']\n commande = data['commande']\n nombre_article = data['nombre_article']\n\n context = {\n 'articles':articles,\n 'commande':commande,\n 'nombre_article':nombre_article\n }\n\n return render(request, 'shop/panier.html', context)\n\n\ndef commande(request, *args, **kwargs):\n \"\"\" Commande \"\"\"\n\n data = data_cookie(request)\n articles = data['articles']\n commande = data['commande']\n nombre_article = data['nombre_article']\n\n context = {\n 'articles':articles,\n 'commande':commande,\n 'nombre_article': nombre_article\n }\n\n return render(request, 'shop/commande.html', context) \n\n\n\ndef update_article(request, *args, **kwargs):\n\n data = json.loads(request.body)\n\n produit_id = data['produit_id']\n\n action = data['action']\n\n client = request.user.client\n\n produit = Produit.objects.get(id=produit_id)\n\n commande, created = Commande.objects.get_or_create(client=client, complete=False)\n\n commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit)\n\n if action == 'add':\n\n commande_article.quantite += 1\n\n if action == 'remove':\n\n commande_article.quantite -= 1\n\n commande_article.save()\n\n if commande_article.quantite <= 0:\n\n commande_article.delete() \n\n return JsonResponse(\"Article ajouté\", safe=False)\n\n\ndef traitementCommande(request, *args, **kwargs):\n \"\"\" traitement, validation de la com;ande et verification de l'integrite des donnees(detection de fraude)\"\"\"\n\n STATUS_TRANSACTION = ['ACCEPTED', 'COMPLETED', 'SUCESS']\n \n transaction_id = datetime.now().timestamp()\n\n data = json.loads(request.body)\n\n print(data)\n\n if request.user.is_authenticated:\n\n client = request.user.client\n\n commande, created = Commande.objects.get_or_create(client=client, complete=False)\n\n\n else:\n"
},
{
"from": "gpt",
"value": " client, commande = commandeAnonyme(request, data)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport torch.optim as optim\r\nfrom utils.lr_scheduler import WarmupMultiStepLR, WarmupCosineAnnealingLR\r\n\n# Code from current file (utils/solver.py):\n\r\n\r\n\r\ndef _optimizer(config, model):\r\n if config.solver.optim == 'adam':\r\n optimizer = optim.Adam([{'params': model.parameters()}],\r\n lr=config.solver.lr, betas=(0.9, 0.98), eps=1e-8,\r\n weight_decay=0.2) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset\r\n print('Adam')\r\n elif config.solver.optim == 'sgd':\r\n\r\n optimizer = optim.SGD([{'params': model.parameters()}],\r\n config.solver.lr,\r\n momentum=config.solver.momentum,\r\n weight_decay=config.solver.weight_decay)\r\n print('SGD')\r\n elif config.solver.optim == 'adamw':\r\n vision_params = list(map(id, model.visual.parameters()))\r\n text_params = filter(lambda p: id(p) not in vision_params,\r\n model.parameters())\r\n\r\n optimizer = optim.AdamW([{'params': text_params},\r\n {'params': model.visual.parameters(), 'lr': config.solver.lr * config.solver.ratio},],\r\n betas=(0.9, 0.98), lr=config.solver.lr, eps=1e-8,\r\n weight_decay=config.solver.weight_decay) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset\r\n for param_group in optimizer.param_groups:\r\n print(param_group['lr'])\r\n print('AdamW')\r\n else:\r\n raise ValueError('Unknown optimizer: {}'.format(config.solver.optim))\r\n return optimizer\r\n\r\ndef _lr_scheduler(config,optimizer):\r\n if config.solver.type == 'cosine':\r\n lr_scheduler = WarmupCosineAnnealingLR(\r\n optimizer,\r\n config.solver.epochs,\r\n warmup_epochs=config.solver.lr_warmup_step\r\n )\r\n elif config.solver.type == 'multistep':\r\n if isinstance(config.solver.lr_decay_step, list):\r\n milestones = config.solver.lr_decay_step\r\n elif isinstance(config.solver.lr_decay_step, int):\r\n milestones = [\r\n config.solver.lr_decay_step * (i + 1)\r\n for i in range(config.solver.epochs //\r\n config.solver.lr_decay_step)]\r\n else:\r\n raise ValueError(\"error learning rate decay step: {}\".format(type(config.solver.lr_decay_step)))\r\n"
},
{
"from": "gpt",
"value": " lr_scheduler = WarmupMultiStepLR(\r"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport random\nimport uuid\nfrom faker import Faker\nfrom CE2OCF.ocf.mocks.company import fake_phone_number\nfrom CE2OCF.types.enums import (\n DoubleTriggerTypesEnum,\n PaidWithOptionsEnum,\n SingleTriggerTypesEnum,\n VestingTypesEnum,\n)\nfrom CE2OCF.types.models import Stockholder\n\n# Code from current file (CE2OCF/ocf/mocks/stockholders.py):\n\n\n\nfake = Faker()\n\n\ndef sum_shares(stockholder_list: list[Stockholder]) -> tuple[int, int]:\n\n total_FFPreferredShares = 0\n total_Shares = 0\n\n for stockholder in stockholder_list:\n if stockholder.FFPreferredShares is not None:\n total_FFPreferredShares += stockholder.FFPreferredShares\n if stockholder.Shares is not None:\n total_Shares += stockholder.Shares # if Shares are floats, replace with `float(stockholder.Shares)`\n\n return total_FFPreferredShares, total_Shares\n\n\ndef mock_stockholder() -> Stockholder:\n return Stockholder(\n id=uuid.uuid4().__str__(),\n"
},
{
"from": "gpt",
"value": " DoubleTrigger=random.choice(list(DoubleTriggerTypesEnum)),"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom utils.utils import oneline_wrapper\nfrom utils import error\nfrom multiprocessing import Process\nimport os\nimport time\nimport argparse\n\n# Code from current file (run_advanced_maxpressure.py):\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--memo\", type=str, default='AdvancedMaxPressure')\n parser.add_argument(\"--model\", type=str, default=\"AdvancedMaxPressure\")\n parser.add_argument(\"--proj_name\", type=str, default=\"chatgpt-TSCS\")\n parser.add_argument(\"--eightphase\", action=\"store_true\", default=False)\n parser.add_argument(\"--multi_process\", action=\"store_true\", default=True)\n parser.add_argument(\"--workers\", type=int, default=1)\n parser.add_argument(\"--dataset\", type=str, default=\"template\")\n parser.add_argument(\"--traffic_file\", type=str, default=\"flow_main_stream.json\")\n\n return parser.parse_args()\n\n\ndef main(in_args):\n traffic_file_list = []\n\n if in_args.dataset == 'jinan':\n count = 3600\n road_net = \"3_4\"\n traffic_file_list = [\"anon_3_4_jinan_real.json\", \"anon_3_4_jinan_real_2000.json\", \"anon_3_4_jinan_real_2500.json\"]\n template = \"Jinan\"\n elif in_args.dataset == 'hangzhou':\n count = 3600\n road_net = \"4_4\"\n traffic_file_list = [\"anon_4_4_hangzhou_real.json\", \"anon_4_4_hangzhou_real_5816.json\"]\n template = \"Hangzhou\"\n elif in_args.dataset == 'newyork_16x3':\n count = 3600\n road_net = \"16_3\"\n traffic_file_list = [\"anon_16_3_newyork_real.json\"]\n template = \"NewYork\"\n elif in_args.dataset == 'newyork_28x7':\n count = 3600\n road_net = \"28_7\"\n traffic_file_list = [\"anon_28_7_newyork_real_double.json\", \"anon_28_7_newyork_real_triple.json\"]\n template = \"NewYork\"\n elif in_args.dataset == 'template':\n count = 3600\n road_net = \"1_1\"\n traffic_file_list = [\"flow_main_stream.json\"]\n template = \"template\"\n\n # flow_file error\n try:\n if in_args.traffic_file not in traffic_file_list:\n"
},
{
"from": "gpt",
"value": " raise error.flowFileException('Flow file does not exist.')"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nfrom datetime import datetime, timedelta, timezone\nfrom pathlib import Path\nfrom unittest.mock import MagicMock\nfrom pandas import DataFrame, DateOffset, Timestamp, to_datetime\nfrom freqtrade.configuration import TimeRange\nfrom freqtrade.constants import LAST_BT_RESULT_FN\nfrom freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism,\n extract_trades_of_period, get_latest_backtest_filename,\n get_latest_hyperopt_file, load_backtest_data,\n load_backtest_metadata, load_trades, load_trades_from_db)\nfrom freqtrade.data.history import load_data, load_pair_history\nfrom freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum,\n calculate_expectancy, calculate_market_change,\n calculate_max_drawdown, calculate_sharpe, calculate_sortino,\n calculate_underwater, combine_dataframes_with_mean,\n create_cum_profit)\nfrom freqtrade.exceptions import OperationalException\nfrom freqtrade.util import dt_utc\nfrom tests.conftest import CURRENT_TEST_STRATEGY, create_mock_trades\nfrom tests.conftest_trades import MOCK_TRADE_COUNT\nimport pytest\n\n# Code from current file (tests/data/test_btanalysis.py):\n\n\n\n\ndef test_get_latest_backtest_filename(testdatadir, mocker):\n with pytest.raises(ValueError, match=r\"Directory .* does not exist\\.\"):\n get_latest_backtest_filename(testdatadir / 'does_not_exist')\n\n with pytest.raises(ValueError,\n match=r\"Directory .* does not seem to contain .*\"):\n get_latest_backtest_filename(testdatadir)\n\n testdir_bt = testdatadir / \"backtest_results\"\n res = get_latest_backtest_filename(testdir_bt)\n assert res == 'backtest-result.json'\n\n res = get_latest_backtest_filename(str(testdir_bt))\n assert res == 'backtest-result.json'\n\n mocker.patch(\"freqtrade.data.btanalysis.json_load\", return_value={})\n\n with pytest.raises(ValueError, match=r\"Invalid '.last_result.json' format.\"):\n get_latest_backtest_filename(testdir_bt)\n\n\ndef test_get_latest_hyperopt_file(testdatadir):\n res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle')\n assert res == testdatadir / 'does_not_exist/testfile.pickle'\n\n res = get_latest_hyperopt_file(testdatadir.parent)\n assert res == testdatadir.parent / \"hyperopt_results.pickle\"\n\n res = get_latest_hyperopt_file(str(testdatadir.parent))\n assert res == testdatadir.parent / \"hyperopt_results.pickle\"\n\n # Test with absolute path\n with pytest.raises(\n OperationalException,\n match=\"--hyperopt-filename expects only the filename, not an absolute path.\"):\n get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent))\n\n\ndef test_load_backtest_metadata(mocker, testdatadir):\n res = load_backtest_metadata(testdatadir / 'nonexistant.file.json')\n assert res == {}\n\n mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename')\n mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception())\n with pytest.raises(OperationalException,\n match=r\"Unexpected error.*loading backtest metadata\\.\"):\n load_backtest_metadata(testdatadir / 'nonexistant.file.json')\n\n\ndef test_load_backtest_data_old_format(testdatadir, mocker):\n\n filename = testdatadir / \"backtest-result_test222.json\"\n mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[])\n\n with pytest.raises(OperationalException,\n match=r\"Backtest-results with only trades data are no longer supported.\"):\n load_backtest_data(filename)\n\n\ndef test_load_backtest_data_new_format(testdatadir):\n\n filename = testdatadir / \"backtest_results/backtest-result.json\"\n bt_data = load_backtest_data(filename)\n assert isinstance(bt_data, DataFrame)\n assert set(bt_data.columns) == set(BT_DATA_COLUMNS)\n assert len(bt_data) == 179\n\n # Test loading from string (must yield same result)\n bt_data2 = load_backtest_data(str(filename))\n assert bt_data.equals(bt_data2)\n\n # Test loading from folder (must yield same result)\n bt_data3 = load_backtest_data(testdatadir / \"backtest_results\")\n assert bt_data.equals(bt_data3)\n\n with pytest.raises(ValueError, match=r\"File .* does not exist\\.\"):\n load_backtest_data(\"filename\" + \"nofile\")\n\n with pytest.raises(ValueError, match=r\"Unknown dataformat.\"):\n load_backtest_data(testdatadir / \"backtest_results\" / LAST_BT_RESULT_FN)\n\n\ndef test_load_backtest_data_multi(testdatadir):\n\n filename = testdatadir / \"backtest_results/backtest-result_multistrat.json\"\n for strategy in ('StrategyTestV2', 'TestStrategy'):\n bt_data = load_backtest_data(filename, strategy=strategy)\n assert isinstance(bt_data, DataFrame)\n assert set(bt_data.columns) == set(\n BT_DATA_COLUMNS)\n assert len(bt_data) == 179\n\n # Test loading from string (must yield same result)\n bt_data2 = load_backtest_data(str(filename), strategy=strategy)\n assert bt_data.equals(bt_data2)\n\n with pytest.raises(ValueError, match=r\"Strategy XYZ not available in the backtest result\\.\"):\n load_backtest_data(filename, strategy='XYZ')\n\n with pytest.raises(ValueError, match=r\"Detected backtest result with more than one strategy.*\"):\n load_backtest_data(filename)\n\n\[email protected](\"init_persistence\")\[email protected]('is_short', [False, True])\ndef test_load_trades_from_db(default_conf, fee, is_short, mocker):\n\n"
},
{
"from": "gpt",
"value": " create_mock_trades(fee, is_short)"
}
] |
[
{
"from": "human",
"value": "Complete the next line of code: # Imports from current file:\nimport json\nfrom typing import List\nfrom obsidian_to_bookstack.bookstack.artifacts import Book, Shelf\nfrom obsidian_to_bookstack.bookstack.client import RemoteClient\nfrom obsidian_to_bookstack.bookstack.collectors.collector import \\\n RemoteCollector\nfrom obsidian_to_bookstack.bookstack.constants import *\nfrom obsidian_to_bookstack.console import console\nfrom obsidian_to_bookstack.utils import con_hash\n\n# Code from current file (obsidian_to_bookstack/bookstack/collectors/remote/RemoteBookCollector.py):\n\n\n\nclass RemoteBookCollector(RemoteCollector):\n def __init__(self, verbose: bool, client: RemoteClient) -> None:\n super().__init__(verbose, client)\n\n def get_books(self, shelves: List[Shelf]):\n \"\"\"Get remote books from shelves\"\"\"\n client_books = self.client._get_from_client(BookstackAPIEndpoints.BOOKS)\n\n for book in client_books:\n\n class DetailedBook(DetailedBookstackLink):\n LINK = f\"/api/books/{book['id']}\"\n\n details = json.loads(\n self.client._make_request(\n RequestType.GET,\n DetailedBook.LINK,\n ).data.decode()\n )\n\n book[\"details\"] = details\n\n books = [Book(book[\"name\"], details=book[\"details\"]) for book in client_books]\n\n BOOK_MAP = {\n con_hash(book.name + str(book.details[\"id\"])): book for book in books\n }\n\n for shelf in shelves:\n for book in shelf.client_books:\n b = BOOK_MAP.get(con_hash(book[\"name\"] + str(book[\"id\"])))\n if b:\n b.shelf = shelf\n shelf.books.append(b)\n\n if self.verbose:\n"
},
{
"from": "gpt",
"value": " console.log(f\"Found remote book: {b}\")"
}
] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 44