code
stringlengths
57
46.3k
quality_prob
float64
0.7
0.99
learning_prob
float64
0.5
1
def calculate_desired_noise_rms(clean_rms, snr): """ Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR), calculate the desired RMS of a noise sound to be mixed in. Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20 :param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0 :param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60 :return: """ a = float(snr) / 20 noise_rms = clean_rms / (10 ** a) return noise_rms
0.930253
0.753603
def is_waveform_multichannel(samples): """ Return bool that answers the question: Is the given ndarray a multichannel waveform or not? :param samples: numpy ndarray :return: """ return len(samples.shape) > 1
0.772101
0.565959
def is_spectrogram_multichannel(spectrogram): """ Return bool that answers the question: Is the given ndarray a multichannel spectrogram? :param samples: numpy ndarray :return: """ return len(spectrogram.shape) > 2 and spectrogram.shape[-1] > 1
0.823577
0.777215
def normalize_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx.xxxxx (10.5) format. Note that timestamps using values greater than or equal to November 20th, 2286 at 17:46 UTC will use 11 digits to represent the number of seconds. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return "%016.05f" % (float(timestamp))
0.860472
0.780662
def unpack_str(byteseq): """Unpack a byte sequence into a string.""" return byteseq.decode()
0.721743
0.609408
def num_model_detection_error(ground_truth_vps, detected_vps): """Measures error in the number of detected vanishing points. Returns: Integer, positive when there are too many VPs, negative when there are too few. """ return len(detected_vps) - len(ground_truth_vps)
0.785679
0.801237
def binary_search_iterative(array, item): """Time Complexity: O(log*n) because you are constantly dividing the length of array by 2 until array length is 1 Space Complexity: O(1) """ left, right = 0, len(array) - 1 if len(array) == 0: return None while left <= right: middle = left + (right - left) // 2 if item == array[middle]: return middle elif item > array[middle]: left = middle + 1 else: right = middle - 1 return None
0.755997
0.620765
import torch def smooth_l1_loss(pred, target, beta=1.0): """Smooth l1 loss. :param pred: predict :param target: target :param beta: beta :return: loss """ assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) return loss
0.768907
0.611295
import torch def _expand_binary_labels(labels, label_weights, label_channels): """Expand binary labels. :param labels: labels :param label_weights: label weights :param label_channels: label channels :return: binary label and label weights """ bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 if label_weights is None: bin_label_weights = None else: bin_label_weights = label_weights.view(-1, 1).expand(label_weights.size(0), label_channels) return bin_labels, bin_label_weights
0.779154
0.550547
def normalize_pack_version(version): """ Normalize old, pre StackStorm v2.1 non valid semver version string (e.g. 0.2) to a valid semver version string (0.2.0). :rtype: ``str`` """ version = str(version) version_seperator_count = version.count('.') if version_seperator_count == 1: version = version + '.0' return version
0.719482
0.505249
def roundup_to_integer_multiple(x, factor): """Round up integer x to the nearest integer multiple of integer factor. Returns x if factor is set to -1. Both x and factor must otherwise be positive.""" # ensure integers assert int(x) == x, "The input x is not an integer." assert int(factor) == factor, "The input factor is not an integer." # use -1 to indicate no padding needed if factor == -1: return x # ensure positive values assert factor > 0 and x > 0, "Factor and x are <= 0." if x < factor: return factor else: if x % factor == 0: return x else: return x + (factor - (x % factor))
0.886039
0.563828
def calculate_matvec_accumulator_range(matrix, vec_dt): """Calculate the minimum and maximum possible result (accumulator) values for a dot product x * A, given matrix A of dims (MW, MH), and vector (1, MW) with datatype vec_dt. Returns (acc_min, acc_max). """ min_weight = matrix.min() max_weight = matrix.max() perceptive_field_elems = matrix.shape[0] min_input = vec_dt.min() max_input = vec_dt.max() # calculate minimum and maximum values of accumulator # assume inputs span the whole range of the input datatype acc_min = perceptive_field_elems * min( min_weight * max_input, min_weight * min_input, max_weight * max_input, max_weight * min_input, ) acc_max = perceptive_field_elems * max( min_weight * max_input, min_weight * min_input, max_weight * max_input, max_weight * min_input, ) return (acc_min, acc_max)
0.871146
0.762114
import torch def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode): """Transform coordinates in the camera frame to the pixel frame. Args: cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W] proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4] proj_c2p_tr: translation vectors of cameras -- [B, 3, 1] Returns: array of [-1,1] coordinates -- [B, 2, H, W] """ b, _, h, w = cam_coords.size() cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W] if proj_c2p_rot is not None: pcoords = proj_c2p_rot.bmm(cam_coords_flat) else: pcoords = cam_coords_flat if proj_c2p_tr is not None: pcoords = pcoords + proj_c2p_tr # [B, 3, H*W] X = pcoords[:, 0] Y = pcoords[:, 1] Z = pcoords[:, 2].clamp(min=1e-3) X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W] Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W] if padding_mode == 'zeros': X_mask = ((X_norm > 1)+(X_norm < -1)).detach() X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach() Y_norm[Y_mask] = 2 pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2] return pixel_coords.view(b,h,w,2)
0.805058
0.637045
import torch def euler2mat(angle): """Convert euler angles to rotation matrix. Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174 Args: angle: rotation angle along 3 axis (in radians) -- size = [B, 3] Returns: Rotation matrix corresponding to the euler angles -- size = [B, 3, 3] """ B = angle.size(0) x, y, z = angle[:,0], angle[:,1], angle[:,2] cosz = torch.cos(z) sinz = torch.sin(z) zeros = z.detach()*0 ones = zeros.detach()+1 zmat = torch.stack([cosz, -sinz, zeros, sinz, cosz, zeros, zeros, zeros, ones], dim=1).view(B, 3, 3) cosy = torch.cos(y) siny = torch.sin(y) ymat = torch.stack([cosy, zeros, siny, zeros, ones, zeros, -siny, zeros, cosy], dim=1).view(B, 3, 3) cosx = torch.cos(x) sinx = torch.sin(x) xmat = torch.stack([ones, zeros, zeros, zeros, cosx, -sinx, zeros, sinx, cosx], dim=1).view(B, 3, 3) rotMat = xmat.bmm(ymat).bmm(zmat) return rotMat
0.950353
0.861538
import torch def quat2mat(quat): """Convert quaternion coefficients to rotation matrix. Args: quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3] Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """ norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1) norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True) w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3] B = quat.size(0) w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2) wx, wy, wz = w*x, w*y, w*z xy, xz, yz = x*y, x*z, y*z rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz, 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx, 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3) return rotMat
0.914558
0.940024
def elemwise_mul(a, b): """ a: A theano matrix b: A theano matrix Returns the elementwise product of a and b """ return a * b
0.713531
0.629276
import torch def l2_norm(input, axis=1): """l2 normalization. Args: input (torch.Tensor): The input tensor. axis (int, optional): Specifies which axis of input to calculate the norm across. Defaults to 1. Returns: Tensor: Tensor after L2 normalization per-instance. """ norm = torch.norm(input, 2, axis, True) output = torch.div(input, norm) return output
0.818193
0.500122
def rectangle_centroid(rectangle): """ get the centroid of the rectangle Keyword arguments: rectangle -- polygon geojson object return centroid """ bbox = rectangle['coordinates'][0] xmin = bbox[0][0] ymin = bbox[0][1] xmax = bbox[2][0] ymax = bbox[2][1] xwidth = xmax - xmin ywidth = ymax - ymin return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth / 2]}
0.817684
0.666021
def fixed_time_horizon(df, column='close', lookback=20): """ Fixed-time Horizon As it relates to finance, virtually all ML papers label observations using the fixed-time horizon method. Fixed-time horizon is presented as one of the main procedures to label data when it comes to processing financial time series for machine learning. Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." lookahead: str The number of days to look ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances in financial machine learning, 2018 5. Dixon et al., Classification-based financial markets prediction using deep neural networks, 2017 """ price = df[column] label = (price.shift(-lookback) / price > 1).astype(int) return label
0.937002
0.756942
def _make_cache_key(times, targets): """ Make a unique key to reference this combination of ``times`` and ``targets``. Often, we wish to store expensive calculations for a combination of ``targets`` and ``times`` in a cache on an ``observer``` object. This routine will provide an appropriate, hashable, key to store these calculations in a dictionary. Parameters ---------- times : `~astropy.time.Time` Array of times on which to test the constraint. targets : `~astropy.coordinates.SkyCoord` Target or list of targets. Returns ------- cache_key : tuple A hashable tuple for use as a cache key """ # make a tuple from times try: timekey = tuple(times.jd) + times.shape except BaseException: # must be scalar timekey = (times.jd,) # make hashable thing from targets coords try: if hasattr(targets, 'frame'): # treat as a SkyCoord object. Accessing the longitude # attribute of the frame data should be unique and is # quicker than accessing the ra attribute. targkey = tuple(targets.frame.data.lon.value.ravel()) + targets.shape else: # assume targets is a string. targkey = (targets,) except BaseException: targkey = (targets.frame.data.lon,) return timekey + targkey
0.903967
0.904693
def min_best_rescale(vals, min_val, max_val, less_than_min=1): """ rescales an input array ``vals`` to be a score (between zero and one), where the ``min_val`` goes to one, and the ``max_val`` goes to zero. Parameters ---------- vals : array-like the values that need to be rescaled to be between 0 and 1 min_val : float worst acceptable value (rescales to 0) max_val : float best value cared about (rescales to 1) less_than_min : 0 or 1 what is returned for ``vals`` below ``min_val``. (in some cases anything less than ``min_val`` should also return one, in some cases it should return zero) Returns ------- array of floats between 0 and 1 inclusive rescaled so that ``vals`` equal to ``max_val`` equal 0 and those equal to ``min_val`` equal 1 Examples -------- rescale airmasses to between 0 and 1, with the best (1) and worst (2.25). All values outside the range should return 0. >>> from astroplan.constraints import min_best_rescale >>> import numpy as np >>> airmasses = np.array([1, 1.5, 2, 3, 0]) >>> min_best_rescale(airmasses, 1, 2.25, less_than_min = 0) # doctest: +FLOAT_CMP array([ 1. , 0.6, 0.2, 0. , 0. ]) """ rescaled = (vals - max_val) / (min_val - max_val) below = vals < min_val above = vals > max_val rescaled[below] = less_than_min rescaled[above] = 0 return rescaled
0.937947
0.981095
def max_best_rescale(vals, min_val, max_val, greater_than_max=1): """ rescales an input array ``vals`` to be a score (between zero and one), where the ``max_val`` goes to one, and the ``min_val`` goes to zero. Parameters ---------- vals : array-like the values that need to be rescaled to be between 0 and 1 min_val : float worst acceptable value (rescales to 0) max_val : float best value cared about (rescales to 1) greater_than_max : 0 or 1 what is returned for ``vals`` above ``max_val``. (in some cases anything higher than ``max_val`` should also return one, in some cases it should return zero) Returns ------- array of floats between 0 and 1 inclusive rescaled so that ``vals`` equal to ``min_val`` equal 0 and those equal to ``max_val`` equal 1 Examples -------- rescale an array of altitudes to be between 0 and 1, with the best (60) going to 1 and worst (35) going to 0. For values outside the range, the rescale should return 0 below 35 and 1 above 60. >>> from astroplan.constraints import max_best_rescale >>> import numpy as np >>> altitudes = np.array([20, 30, 40, 45, 55, 70]) >>> max_best_rescale(altitudes, 35, 60) # doctest: +FLOAT_CMP array([ 0. , 0. , 0.2, 0.4, 0.8, 1. ]) """ rescaled = (vals - min_val) / (max_val - min_val) below = vals < min_val above = vals > max_val rescaled[below] = 0 rescaled[above] = greater_than_max return rescaled
0.942354
0.984796
def convert_qkv_weight(cfg, value): """ Convert qkv.weight to be compatible with LiBai transformer layer Args: cfg: config file value: qkv.weight in the loaded checkpoint """ num_heads = cfg.model.num_heads hidden_size = cfg.model.embed_dim head_size = int(hidden_size / num_heads) qkv_weight = ( value.view([3, num_heads, head_size, hidden_size]) .permute(1, 0, 2, 3) .contiguous() .view(hidden_size * 3, hidden_size) ) return qkv_weight
0.88136
0.689458
def convert_qkv_bias(cfg, value): """ Convert qkv.bias to be compatible with LiBai transformer layer Args: cfg: config file value: qkv.bias in the loaded checkpoint """ num_heads = cfg.model.num_heads hidden_size = cfg.model.embed_dim head_size = int(hidden_size / num_heads) qkv_bias = ( value.view(3, num_heads, head_size).permute(1, 0, 2).contiguous().view(hidden_size * 3) ) return qkv_bias
0.914901
0.817137
def get_supported_schedulers(): """ Return a tuple of the scheduler supported by parallelcluster. :return: a tuple of strings of the supported scheduler """ return "sge", "torque", "slurm", "awsbatch"
0.744006
0.517937
def textBoxSize(txt, transformation=None, figure=None): """Get the width and height of a text object's bounding box transformed to the desired coordinates. Defaults to figure coordinates if transformation is None.""" fig= txt.get_figure() if figure is None else figure if transformation is None: transformation = fig.transFigure coordConvert = transformation.inverted().transform bboxDisp = txt.get_window_extent(fig.canvas.renderer) bboxConv = coordConvert(bboxDisp) w = bboxConv[1,0] - bboxConv[0,0] h = bboxConv[1,1] - bboxConv[0,1] return w, h
0.802517
0.693596
def pretty_date(ago): """ Process a timedelta object. From https://stackoverflow.com/questions/1551382/user-friendly-time-format-in-python """ second_diff = ago.seconds day_diff = ago.days if day_diff < 0: return '' if day_diff == 0: if second_diff < 10: return "just now" if second_diff < 60: return str(second_diff) + " seconds ago" if second_diff < 120: return "a minute ago" if second_diff < 3600: return str(second_diff / 60) + " minutes ago" if second_diff < 7200: return "an hour ago" if second_diff < 86400: return str(second_diff / 3600) + " hours ago" if day_diff == 1: return "Yesterday" if day_diff < 7: return str(day_diff) + " days ago" if day_diff < 31: if day_diff / 7 == 1: return str(day_diff / 7) + " week ago" return str(day_diff / 7) + " weeks ago" if day_diff < 365: if day_diff / 30 == 1: return str(day_diff / 30) + " month ago" return str(day_diff / 30) + " months ago" if day_diff / 365 == 1: return str(day_diff / 365) + " year ago" return str(day_diff / 365) + " years ago"
0.736401
0.514217
def combine_dict(d1,d2): """Creates a dictionary which has entries from both of them. :param d1: dictionary 1 :param d2: dictionary 2 :return: resulting dictionary """ d = d1.copy() d.update(d2) return d
0.723993
0.975414
def lift_to_dimension(A, dim): """Creates a view of A of dimension dim (by adding dummy dimensions if necessary). :param A: numpy array :param dim: desired dimension of view :return: returns view of A of appropriate dimension """ current_dim = len(A.shape) if current_dim > dim: raise ValueError('Can only add dimensions, but not remove them') if current_dim == dim: return A else: return A.reshape([1]*(dim-current_dim)+list(A.shape))
0.769514
0.587647
def get_dim_of_affine_transform(Ab): """Returns the number of dimensions corresponding to an affine transformation of the form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply [a1;a2;a3;b], i.e., all columns stacked on top of each other. :param Ab: parameter vector :return: dimensionality of transform (1,2,or 3) """ nr = len(Ab) if nr==2: return 1 elif nr==6: return 2 elif nr==12: return 3 else: raise ValueError('Only supports dimensions 1, 2, and 3.')
0.795975
0.727129
def t2np(v): """ Takes a torch array and returns it as a numpy array on the cpu :param v: torch array :return: numpy array """ return (v.detach()).cpu().numpy()
0.77518
0.791821
def cxyz_to_xyzc( v ): """ Takes a torch array and returns it as a numpy array on the cpu :param v: torch array :return: numpy array """ dim = len(v.shape)-2 if dim ==2: v = v.permute(0,2,3,1) if dim ==3: v = v.permute(0,2,3,4,1) return v
0.725357
0.735642
def best_scale(number): """Scale and units for a number with proper prefix.""" absnr = abs(number) if absnr == 0: return 1, ' ' if absnr < 0.99999999e-9: return 1e12, 'p' if absnr < 0.99999999e-6: return 1e9, 'n' if absnr < 0.99999999e-3: return 1e6, 'µ' if absnr < 0.99999999: return 1e3, 'm' if absnr < 0.99999999e3: return 1, ' ' if absnr < 0.99999999e6: return 1e-3, 'k' if absnr < 0.999999991e9: return 1e-6, 'M' return 1e-9, 'G'
0.702326
0.625495
def crop_img(img, relative_corners): """ relative_corners are floats between 0 and 1 designating where the corners of a crop box should be ([[top_left_x, top_left_y], [bottom_right_x, bottom_right_y]]). e.g. [[0, 0], [1, 1]] would be the full image, [[0.5, 0.5], [1, 1]] would be bottom right.""" rc = relative_corners raw_height, raw_width = img.shape[:2] top_left_pix = [int(rc[0][0] * raw_width), int(rc[0][1] * raw_height)] bottom_right_pix = [int(rc[1][0] * raw_width), int(rc[1][1] * raw_height)] img_cropped = img[top_left_pix[1]:bottom_right_pix[1], top_left_pix[0]:bottom_right_pix[0]] return img_cropped
0.809765
0.708015
def loss(y_pred, y_true, metric): """Compute loss function between prediction and ground truth. Loss function given by a Riemannian metric, expressed as the squared geodesic distance between the prediction and the ground truth. Parameters ---------- y_pred y_true metric Returns ------- loss """ loss = metric.squared_dist(y_pred, y_true) return loss
0.947884
0.811265
def kl_to_prior(means, log_stds, stds): """ KL between a Gaussian and a standard Gaussian. https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians """ return 0.5 * ( - 2 * log_stds # log std_prior = 0 - 1 # d = 1 + stds ** 2 + means ** 2 )
0.72952
0.557243
def getConflictingAssignments(schedule): """ Get list of assignments which exceeded rotation capacity Parameters: schedule (dict): overall assignments Returns: confictingAssignmentsByRotation (dict): overall schedule with conflicting assignments """ return {}
0.777131
0.615203
def quadratic_formula(polynomial): """ input is single-variable polynomial of degree 2 returns zeros """ if len(polynomial.term_matrix) == 3: if polynomial.term_matrix[2][1] == 1: a, b = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0] return 0, -b/a a, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0] return (-c/a)**.5, -(-c/a)**.5 if len(polynomial.term_matrix) == 2: a, b, c, = polynomial.term_matrix[1][0], 0, 0 elif len(polynomial.term_matrix) == 3: a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], 0 else: a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], polynomial.term_matrix[3][0] ans1 = (-b + (b**2 - 4*a*c)**.5)/2*a ans2 = (-b - (b**2 - 4*a*c)**.5)/2*a if ans1 == ans2: return ans1 return ans1, ans2
0.739234
0.795221
def generate_coordinates(coords): """ A function that returns all possible triples of coords Parameters: coords: a numpy array of coordinates Returns: x: the first coordinate of possible triples y: the second coordinate of possible triples z the third coordinate of possible triples """ x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten() y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords)) z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords)) return x, y, z
0.914827
0.929824
def midpoint_rule(f, M=100000): """Integrate f(x) over [0,1] using M intervals.""" from numpy import sum, linspace dx = 1.0/M # interval length x = linspace(dx/2, 1-dx/2, M) # integration points return dx*sum(f(x))
0.700895
0.504639
def dollar(amount): """ Given an amount as a number Return a string formatted as a dollar amount """ amount = round(amount, 2) return '${0:0.2f}'.format(amount)
0.778986
0.924688
def dataqc_condcompress(p_orig, p_new, c_orig, cpcor=-9.57e-8): """ Description: Implementation of the Sea-Bird conductivity compressibility correction, scaling the input conductivity based on ratio of the original pressure and the updated pressure. Implemented by: 2013-04-07: Christopher Wingard. Initial python implementation. Usage: c_new = dataqc_condcompress(p_orig, p_new, c_orig, cpcor) where c_new = updated conductivity record [S/m] p_orig = original pressure used to calculate original conductivity, this typically the L1a PRESWAT [dbar] p_new = updated pressure, typically L1b PRESWAT [dbar] c_orig = original conductivty record, typically L1a CONDWAT [S/m] cpcor = pressure correction coefficient used to calculate original conductivity, default is -9.57e-8 References: OOI (2012). Data Product Specification for Conductivity Compressibility Correction. Document Control Number 1341-10030. https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled >> 1000 System Level >> 1341-10030_Data_Product_SPEC_CNDCMPR_OOI.pdf) """ c_new = c_orig * (1 + cpcor * p_orig) / (1 + cpcor * p_new) return c_new
0.825906
0.751785
def delta(a, b): """ Return change in percent (or None if undefined). The delta in percent is rounded to one decimal. """ if a is None or b is None: return None if a == 0.0 and b == 0.0: return 0.0 assert a != 0.0 and b != 0.0 return round((b - a) * 1000.0 / a) / 10.0
0.741393
0.654322
def adjust_contrast(image, contrast_level): """Return the image scaled to a certain contrast level in [0, 1]. parameters: - image: a numpy.ndarray - contrast_level: a scalar in [0, 1]; with 1 -> full contrast """ assert(contrast_level >= 0.0), "contrast_level too low." assert(contrast_level <= 1.0), "contrast_level too high." return (1-contrast_level)/2.0 + image.dot(contrast_level)
0.89753
0.78964
def to_list(data_in): """Convert the data into a list. Does not pack lists into a new one. If your input is, for example, a string or a list of strings, or a tuple filled with strings, you have, in general, a problem: - just iterate through the object will fail because it iterates through the characters of the string. - using list(obj) converts the tuple, leaves the list but splits the strings characters into single elements of a new list. - using [obj] creates a list containing a string, but also a list containing a list or a tuple, which you did not want to. Solution: use to_list(obj), which creates a new list in case the object is a single object (a string is a single object in this sence) or converts to a list if the object is already a container for several objects. Parameters ---------- data_in : any obj So far, any object can be entered. Returns ------- out : list Return a list containing the object or the object converted to a list. """ if isinstance(data_in, (str, int, float)): data_in = [data_in] data_in = list(data_in) return data_in
0.814754
0.701317
def dot(a, b, out=None): """Returns a dot product of two arrays. For arrays with more than one axis, it computes the dot product along the last axis of ``a`` and the second-to-last axis of ``b``. This is just a matrix product if the both arrays are 2-D. For 1-D arrays, it uses their unique axis as an axis to take dot product over. Args: a (cupy.ndarray): The left argument. b (cupy.ndarray): The right argument. out (cupy.ndarray): Output array. Returns: cupy.ndarray: The dot product of ``a`` and ``b``. .. seealso:: :func:`numpy.dot` """ # TODO(okuta): check type return a.dot(b, out)
0.814422
0.750736
def coord_to_index(coord, sl): """ Takes a 3D coordinate in a cube and the cube side length. Returns index in flattened 3D array. """ return coord[0] * sl * sl + coord[1] * sl + coord[2]
0.749546
0.992327
def index_to_coord(index, sl): """ Takes an index into a flattened 3D array and its side length. Returns the coordinate in the cube. """ coord = [] two_d_slice_size = sl * sl coord.append(index // two_d_slice_size) remaining = index % two_d_slice_size coord.append(remaining // sl) coord.append(remaining % sl) return coord
0.748076
0.868102
def use_node_def_or_str(given_value, default_func): """Transform a value of type (None, str, Callable) to a node annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, str): given_value = str(given_value) def func(atom): return given_value # Passthrough: value itself is a function else: func = given_value return func
0.745306
0.540378
def use_node_def_or_num(given_value, default_func): """Transform a value of type (None, int, float, Callable) to a node annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, (int, float)): given_value = float(given_value) def func(atom): return given_value # Passthrough: value itself is a function else: func = given_value return func
0.737725
0.556882
def use_edge_def_or_str(given_value, default_func): """Transform a value of type (None, str, Callable) to an edge annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, str): given_value = str(given_value) def func(atom1, atom2): return given_value # Passthrough: value itself is a function else: func = given_value return func
0.752559
0.546436
def use_edge_def_or_num(given_value, default_func): """Transform a value of type (None, int, float, Callable) to an edge annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, (int, float)): given_value = float(given_value) def func(atom1, atom2): return given_value # Passthrough: value itself is a function else: func = given_value return func
0.761361
0.562177
import torch def gnmt_length_penalty(lengths, alpha=0.8): """Calculate a length penalty from https://arxiv.org/pdf/1609.08144.pdf The paper states the penalty as (5 + |Y|)^a / (5 + 1)^a. This is implemented as ((5 + |Y|) / 6)^a for a (very) tiny performance boost :param lengths: `torch.LongTensor`: [B, K] The lengths of the beams. :param alpha: `float`: A hyperparameter. See Table 2 for a search on this parameter. :returns: `torch.FloatTensor`: [B, K, 1] The penalties. """ lengths = lengths.to(torch.float) penalty = torch.pow(((5 + lengths) / 6), alpha) return penalty.unsqueeze(-1)
0.913342
0.563798
def repeat_batch(t, K, dim=0): """Repeat a tensor while keeping the concept of a batch. :param t: `torch.Tensor`: The tensor to repeat. :param K: `int`: The number of times to repeat the tensor. :param dim: `int`: The dimension to repeat in. This should be the batch dimension. :returns: `torch.Tensor`: The repeated tensor. The new shape will be batch size * K at dim, the rest of the shapes will be the same. Example:: >>> a = torch.arange(10).view(2, -1) >>> a tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> a.repeat(2, 1) tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) >>> repeat_batch(a, 2) tensor([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [5, 6, 7, 8, 9]]) """ shape = t.shape tiling = [1] * (len(shape) + 1) tiling[dim + 1] = K tiled = t.unsqueeze(dim + 1).repeat(tiling) old_bsz = shape[dim] new_bsz = old_bsz * K new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim + 1 :]) return tiled.view(new_shape)
0.946076
0.911928
import torch def bilinear_interpolate_torch(im, x, y): """ Args: im: (H, W, C) [y, x] x: (N) y: (N) Returns: """ x0 = torch.floor(x).long() x1 = x0 + 1 y0 = torch.floor(y).long() y1 = y0 + 1 x0 = torch.clamp(x0, 0, im.shape[1] - 1) x1 = torch.clamp(x1, 0, im.shape[1] - 1) y0 = torch.clamp(y0, 0, im.shape[0] - 1) y1 = torch.clamp(y1, 0, im.shape[0] - 1) Ia = im[y0, x0] Ib = im[y1, x0] Ic = im[y0, x1] Id = im[y1, x1] wa = (x1.type_as(x) - x) * (y1.type_as(y) - y) wb = (x1.type_as(x) - x) * (y - y0.type_as(y)) wc = (x - x0.type_as(x)) * (y1.type_as(y) - y) wd = (x - x0.type_as(x)) * (y - y0.type_as(y)) ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd) return ans
0.806396
0.580798
def int_parameter(level, maxval): """Helper function to scale `val` between 0 and maxval . Args: level: Level of the operation that will be between [0, `PARAMETER_MAX`]. maxval: Maximum value that the operation can have. This will be scaled to level/PARAMETER_MAX. Returns: An int that results from scaling `maxval` according to `level`. """ return int(level * maxval / 10)
0.768386
0.610802
def float_parameter(level, maxval): """Helper function to scale `val` between 0 and maxval. Args: level: Level of the operation that will be between [0, `PARAMETER_MAX`]. maxval: Maximum value that the operation can have. This will be scaled to level/PARAMETER_MAX. Returns: A float that results from scaling `maxval` according to `level`. """ return float(level) * maxval / 10.
0.862207
0.820397
def normalize(image): """Normalize input image channel-wise to zero mean and unit variance.""" return image - 127
0.792223
0.963746
def MakeMetadataLine(label, value, indent=1): """Returns a string with a vertically aligned label and value. Labels of the same indentation level will start at the same column. Values will all start at the same column (unless the combined left-indent and label length is excessively long). If a value spans multiple lines, indentation will only be applied to the first line. Example output from several calls: Label1: Value (default indent of 1 was used) Sublabel1: Value (used indent of 2 here) Label2: Value Args: label: The label to print in the first column. value: The value to print in the second column. indent: (4 * indent) spaces will be placed before the label. Returns: A string with a vertically aligned label and value. """ return '{}{}'.format(((' ' * indent * 4) + label + ':').ljust(28), value)
0.90532
0.670947
def to_label(name, capitalize=True): """Converts `name` into label by replacing underscores by spaces. If `capitalize` is ``True`` (default) then the first letter of the label is capitalized.""" label = name.replace("_", " ") if capitalize: label = label.capitalize() return label
0.731538
0.581273
def closest_ref_length(ref_lens, hyp_len): """ This function finds the reference that is the closest length to the hypothesis. The closest reference length is referred to as *r* variable from the brevity penalty formula in Papineni et. al. (2002) :param references: A list of reference translations. :type references: list(list(str)) :param hyp_len: The length of the hypothesis. :type hyp_len: int :return: The length of the reference that's closest to the hypothesis. :rtype: int """ closest_ref_len = min( ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len) ) return closest_ref_len
0.776708
0.756897
def firing_rate(spike_train, duration): """Calculate firing rate for a spike train. If either temporal bound is not specified, the first and last spike time are used by default. Inputs: ------- spike_train : array of spike times (in seconds) duration : length of recording (in seconds) Outputs: -------- fr : float Firing rate in Hz """ fr = spike_train.size / duration return fr
0.788176
0.967132
def get_unit_pcs(these_pc_features, index_mask, channel_mask): """ Use the index_mask and channel_mask to return PC features for one unit Inputs: ------- these_pc_features : numpy.ndarray (float) Array of pre-computed PC features (num_spikes x num_PCs x num_channels) index_mask : numpy.ndarray (boolean) Mask for spike index dimension of pc_features array channel_mask : numpy.ndarray (boolean) Mask for channel index dimension of pc_features array Output: ------- unit_PCs : numpy.ndarray (float) PCs for one unit (num_spikes x num_PCs x num_channels) """ unit_PCs = these_pc_features[index_mask, :, :] unit_PCs = unit_PCs[:, :, channel_mask] return unit_PCs
0.794863
0.76291
def capitalize(text): """capitalizes a word, for use in rendering template Args: text (str): word to capitalize Returns: capitalized (str): capitalized word """ return text[0].upper() + text[1:]
0.821116
0.759839
def rule_separation(value: float, layer1: str, layer2: str): """Min space between different layers""" error = f"min {layer1} {layer2} separation {value}um" return f"{layer1}.separation({layer2}, {value})" f".output('{error}', '{error}')"
0.76454
0.723187
def remove_alpha(pic): """ Removes the alpha channel from an image, if it exists. Necessary for OCR. Args: pic: PIL.Image object to convert. Returns: The PIL.Image object in RGB format. """ return pic.convert("RGB")
0.805288
0.673809
def is_array(signature): """Return True if this argument is an array. A dictionary is considered an array.""" return signature[0] == "a"
0.758332
0.624007
def to_row_vec(col_vec): """ :param col_vec: 2d np array :return: """ return col_vec.reshape(1, -1)
0.758689
0.970688
def concatenate_rounds(rounds_1, rounds_2): """ :param rounds_1: list - first rounds played. :param rounds_2: list - second set of rounds played. :return: list - all rounds played. """ return rounds_1 + rounds_2
0.799403
0.693077
def list_contains_round(rounds, number): """ :param rounds: list - rounds played. :param number: int - round number. :return: bool - was the round played? """ return number in rounds
0.702836
0.50116
def card_average(hand): """ :param hand: list - cards in hand. :return: float - average value of the cards in the hand. """ return sum(hand) / len(hand)
0.702938
0.841044
def split_num(line, chars=' ', maxsplits=1, empty=''): """/lazy/ wrapper, to stop us having to bounds-check when splitting. Arguments: line -- line to split chars -- character(s) to split line on maxsplits -- how many split items are returned empty -- character to put in place of nothing Returns: line.split(chars, items); return value is padded until `maxsplits + 1` number of values are present""" line = line.split(chars, maxsplits) while len(line) <= maxsplits: line.append(empty) return line
0.756178
0.555797
def const_rate(n, p1=0.0, p2=1.0, p3=1.0): """ Constant rate function. :param n: int - allele number (unused) :param p1: float - constant parameter :param p2: float - linear parameter (unused) :param p3: float - additional parameter (unused) :return: float - p1 """ return p1
0.76708
0.553143
def linear_rate(n, p1=0.0, p2=1.0, p3=1.0): """ Linear rate function. :param n: int - allele number :param p1: float - constant parameter :param p2: float - linear parameter :param p3: float - additional parameter (unused) :return: float - p1 + p2 * n """ return p1 + p2 * n
0.821939
0.580293
def n2_rate(n, p1=0.0, p2=1.0, p3=1.0): """ Quadratic rate function. :param n: int - allele number :param p1: float - constant parameter :param p2: float - linear parameter :param p3: float - quadratic parameter :return: float - p1 + p2 * n + p3 * n * n """ return p1 + p2 * n + p3 * n * n
0.752104
0.734881
def set_axis(ax, x, y, letter=None): """ Formats the plot's caption. Parameters ---------- ax: Axes object. x: float X-position of caption. y: float Y-position of caption. letter: string Caption of the plot. Default: None. Returns ------- ax: modyfied Axes object. """ ax.text( x, y, letter, fontsize=15, weight='bold', transform=ax.transAxes) return ax
0.923394
0.536374
def _airtovac(w): """Convert air wavelengths to vacuum wavelengths. Don't convert less than 2000 Å. Parameters ---------- w : :class:`float` Wavelength [Å] of the line in air. Returns ------- :class:`float` Wavelength [Å] of the line in vacuum. """ if w < 2000.0: return w; vac = w for iter in range(2): sigma2 = (1.0e4/vac)*(1.0e4/vac) fact = 1.0 + 5.792105e-2/(238.0185 - sigma2) + 1.67917e-3/(57.362 - sigma2) vac = w*fact return vac
0.943796
0.520862
def dict_zero(first_level_keys): """Initialise a dictionary with one level Parameters ---------- first_level_keys : list First level data Returns ------- one_level_dict : dict dictionary """ one_level_dict = dict.fromkeys(first_level_keys, 0) # set zero as argument return one_level_dict
0.761184
0.743215
import torch def get_dihedral_torch(c1, c2, c3, c4): """ Returns the dihedral angle in radians. Will use atan2 formula from: https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics Can't use torch.dot bc it does not broadcast Inputs: * c1: (batch, 3) or (3,) * c1: (batch, 3) or (3,) * c1: (batch, 3) or (3,) * c1: (batch, 3) or (3,) """ u1 = c2 - c1 u2 = c3 - c2 u3 = c4 - c3 return torch.atan2( ( (torch.norm(u2, dim=-1, keepdim=True) * u1) * torch.cross(u2,u3, dim=-1) ).sum(dim=-1) , ( torch.cross(u1,u2, dim=-1) * torch.cross(u2, u3, dim=-1) ).sum(dim=-1) )
0.86852
0.701728
import torch def distmat_loss_torch(X=None, Y=None, X_mat=None, Y_mat=None, p=2, q=2, custom=None, distmat_mask=None): """ Calculates a loss on the distance matrix - no need to align structs. Inputs: * X: (N, d) tensor. the predicted structure. One of (X, X_mat) is needed. * X_mat: (N, N) tensor. the predicted distance matrix. Optional () * Y: (N, d) tensor. the true structure. One of (Y, Y_mat) is needed. * Y_mat: (N, N) tensor. the predicted distance matrix. Optional () * p: int. power for the distance calculation (2 for euclidean) * q: float. power for the scaling of the loss (2 for MSE, 1 for MAE, etc) * custom: func or None. custom loss over distance matrices. ex: lambda x,y: 1 - 1/ (1 + ((x-y))**2) (1 is very bad. 0 is good) * distmat_mask: (N, N) mask (boolean or weights for each ij pos). optional. """ assert (X is not None or X_mat is not None) and \ (Y is not None or Y_mat is not None), "The true and predicted coords or dist mats must be provided" # calculate distance matrices if X_mat is None: X_mat = torch.cdist(X, X, p=p) if Y_mat is None: Y_mat = torch.cdist(Y, Y, p=p) if distmat_mask is None: distmat_mask = torch.ones_like(Y_mat).bool() # do custom expression if passed if custom is not None: loss = custom(X_mat, Y_mat).mean() # **2 ensures always positive. Later scale back to desired power else: loss = ( X_mat - Y_mat )**2 if q != 2: loss = loss**(q/2) return loss[distmat_mask].mean()
0.859752
0.665988
def Kabsch(A, B): """ Returns Kabsch-rotated matrices resulting from aligning A into B. Adapted from: https://github.com/charnley/rmsd/ * Inputs: * A,B are (3 x N) * backend: one of ["numpy", "torch", "auto"] for backend choice * Outputs: tensor/array of shape (3 x N) """ # run calcs - pick the 0th bc an additional dim was created return A, B
0.735167
0.770249
def RMSD(A, B): """ Returns RMSD score as defined here (lower is better): https://en.wikipedia.org/wiki/ Root-mean-square_deviation_of_atomic_positions * Inputs: * A,B are (B x 3 x N) or (3 x N) * backend: one of ["numpy", "torch", "auto"] for backend choice * Outputs: tensor/array of size (B,) """ return A, B
0.820073
0.903081
def TMscore(A, B): """ Returns TMscore as defined here (higher is better): >0.5 (likely) >0.6 (highly likely) same folding. = 0.2. https://en.wikipedia.org/wiki/Template_modeling_score Warning! It's not exactly the code in: https://zhanglab.ccmb.med.umich.edu/TM-score/TMscore.cpp but will suffice for now. Inputs: * A,B are (B x 3 x N) (np.array or torch.tensor) * mode: one of ["numpy", "torch", "auto"] for backend Outputs: tensor/array of size (B,) """ return A, B
0.788827
0.580352
def get_square(tracks, position): """Get square from tracks with position.""" row, col = position return tracks[row][col]
0.707203
0.526404
def split_history_and_current(windowed_ts): """ Returns the first n-1 columns as X, and the last column as y. Useful mainly for forecasting scenarios :param windowed_ts: a pd.DataFrame with a date index and a column per timestamp. see get_windowed_ts :return: """ X = windowed_ts.iloc[:, :-1].values y = windowed_ts.iloc[:, -1].values return (X, y)
0.744099
0.775009
def calc_accuracy(pred, real): """ A function to calculate the accuracy of a CNN when given a list of predicted classes and a list of the real classes Param: - pred, a numpy array of predicted classes - real, a numpy array of the real classes Return: - Accuracy as a decimal """ return sum(pred==real) / len(pred)
0.753104
0.988313
def convert_to_physical(a_coeffs, b_coeffs, logic_x, logic_y): """ Convert to physical coordinates from logical coordinates. Parameters ---------- a_coeffs : array Perspective transformation coefficients for alpha. b_coeffs : array Perspective transformation coefficients for beta. logic_x : float Logical point in the x direction. logic_y : float Logical point in the y direction. Returns ------- x, y : tuple The x and y physical values on the specified grid. """ # x = a(1) + a(2)*l + a(3)*m + a(4)*l*m x = (a_coeffs[0] + a_coeffs[1] * logic_x + a_coeffs[2] * logic_y + a_coeffs[3] * logic_x * logic_y) # y = b(1) + b(2)*l + b(3)*m + b(4)*l*m y = (b_coeffs[0] + b_coeffs[1] * logic_x + b_coeffs[2] * logic_y + b_coeffs[3] * logic_x * logic_y) return x, y
0.926877
0.884888
def drop_disregard(df): """ If one token in a note is marked 'disregard', remove the whole note from df. Parameters ---------- df: DataFrame parsed token-level annotations df (created by `parse_annotations.py`) Returns ------- DataFrame df without 'disregard' notes """ df['disregard_note'] = df.groupby('NotitieID').disregard.transform('any') return df.query( "not disregard_note" ).drop(columns=['disregard', 'disregard_note'])
0.802517
0.611527
def fix_week_14(df): """ For annotations from week 14: - Replace MBW values with `False` - Replace MBW-lvl values with NaN We remove this domain from week 14 since the guidelines for it were changed after this week. Parameters ---------- df: DataFrame parsed token-level annotations df (created by `parse_annotations.py`) Returns ------- DataFrame df without MBW and MBW_lvl labels for week 14 """ df['MBW'] = df.MBW.mask(df.batch == 'week_14', other=False) df['MBW_lvl'] = df.MBW_lvl.mask(df.batch == 'week_14') return df
0.868576
0.586079
def gaussian_product_center(alpha1,A,alpha2,B): """ The center of the Gaussian resulting from the product of two Gaussians: >>> gaussian_product_center(1,array((0,0,0),'d'),1,array((0,0,0),'d')) array([ 0., 0., 0.]) """ return (alpha1*A+alpha2*B)/(alpha1+alpha2)
0.734501
0.534005
def smoothing_error(x, x_a, A): """Return the smoothing error through the averaging kernel. Parameters: x (ndarray): Atmospherice profile. x_a (ndarray): A priori profile. A (ndarray): Averaging kernel matrix. Returns: ndarray: Smoothing error due to correlation between layers. """ return A @ (x - x_a)
0.917935
0.960584
def get_f_min(f_max, cents_per_value, v_min, v_max): """ This function takes in a y value max and min, a maximum frequency and a y scale parameter in units of cents/y value, and returns the minimum frequency that fits to such a scale. Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)). Parameters ---------- f_max : float Maximum frequency. cents_per_value : float A y scale parameter in units of cents/y value. v_min : float Minimum y value. v_max : float Maximum y value. Returns ------- float Minimum frequency. """ f_min = f_max / (2 ** ((v_max - v_min) * cents_per_value / 1200)) return f_min
0.937168
0.675141
def get_f_max(f_min, cents_per_value, v_min, v_max): """ This function takes in a y value max and min, a minimum frequency and a y scale parameter in units of cents/y value, and returns the maximum frequency that fits to such a scale. Cents are a logarithmic unit of tone intervals (https://en.wikipedia.org/wiki/Cent_(music)). Parameters ---------- f_min : float Minimum frequency. cents_per_value : float A y scale parameter in units of cents/y value. v_min : float Minimum y value. v_max : float Maximum y value. Returns ------- float Maximum frequency. """ f_max = f_min * (2 ** ((v_max - v_min) * cents_per_value / 1200)) return f_max
0.934189
0.710622
def flatten(tensor): """Flattens a given tensor such that the channel axis is first. The shapes are transformed as follows: (N, C, D, H, W) -> (C, N * D * H * W) """ # number of channels C = tensor.size(1) # new axis order axis_order = (1, 0) + tuple(range(2, tensor.dim())) # Transpose: (N, C, D, H, W) -> (C, N, H, W) transposed = tensor.permute(axis_order) # Flatten: (C, N, D, H, W) -> (C, N * H * W) return transposed.contiguous().view(C, -1)
0.85741
0.681264
import torch def expand_as_one_hot(input, C, ignore_index=None): """ Converts NxHxW label image to NxCxDxHxW, where each label gets converted to its corresponding one-hot vector :param input: 4D input image (NxDxHxW) :param C: number of channels/labels :param ignore_index: ignore index to be kept during the expansion :return: 5D output image (NxCxDxHxW) """ assert input.dim() == 3 # expand the input tensor to Nx1xHxW before scattering input = input.unsqueeze(1) # create result tensor shape (NxCxDxHxW) shape = list(input.size()) shape[1] = C if ignore_index is not None: # create ignore_index mask for the result mask = input.expand(shape) == ignore_index # clone the src tensor and zero out ignore_index in the input input = input.clone() input[input == ignore_index] = 0 # scatter to get the one-hot tensor result = torch.zeros(shape).to(input.device).scatter_(1, input, 1) # bring back the ignore_index in the result result[mask] = ignore_index return result else: # scatter to get the one-hot tensor return torch.zeros(shape).to(input.device).scatter_(1, input, 1)
0.766992
0.659302
def batch_quat_to_rotmat(q, out=None): """ quaternion a + bi + cj + dk should be given in the form [a,b,c,d] :param q: :param out: :return: """ import torch batchsize = q.size(0) if out is None: out = q.new_empty(batchsize, 3, 3) # 2 / squared quaternion 2-norm s = 2 / torch.sum(q.pow(2), 1) # coefficients of the Hamilton product of the quaternion with itself h = torch.bmm(q.unsqueeze(2), q.unsqueeze(1)) out[:, 0, 0] = 1 - (h[:, 2, 2] + h[:, 3, 3]).mul(s) out[:, 0, 1] = (h[:, 1, 2] - h[:, 3, 0]).mul(s) out[:, 0, 2] = (h[:, 1, 3] + h[:, 2, 0]).mul(s) out[:, 1, 0] = (h[:, 1, 2] + h[:, 3, 0]).mul(s) out[:, 1, 1] = 1 - (h[:, 1, 1] + h[:, 3, 3]).mul(s) out[:, 1, 2] = (h[:, 2, 3] - h[:, 1, 0]).mul(s) out[:, 2, 0] = (h[:, 1, 3] - h[:, 2, 0]).mul(s) out[:, 2, 1] = (h[:, 2, 3] + h[:, 1, 0]).mul(s) out[:, 2, 2] = 1 - (h[:, 1, 1] + h[:, 2, 2]).mul(s) return out
0.855127
0.810216
import torch def cosine_distance(memory_matrix, cos_keys): """ compute the cosine similarity between keys to each of the memory slot. Parameters: ---------- memory_matrix: Tensor (batch_size, mem_slot, mem_size) the memory matrix to lookup in keys: Tensor (batch_size, mem_size, number_of_keys) the keys to query the memory with strengths: Tensor (batch_size, number_of_keys, ) the list of strengths for each lookup key Returns: Tensor (batch_size, mem_slot, number_of_keys) The list of lookup weightings for each provided key """ memory_norm = torch.norm(memory_matrix, 2, 2, keepdim=True) keys_norm = torch.norm(cos_keys, 2, 1, keepdim=True) normalized_mem = torch.div( memory_matrix, memory_norm.expand_as(memory_matrix) + 1e-8) normalized_keys = torch.div(cos_keys, keys_norm.expand_as(cos_keys) + 1e-8) out = torch.bmm(normalized_mem, normalized_keys) # print(normalized_keys) # print(out) # apply_dict(locals()) return out
0.803174
0.748168
def center_to_corner(boxes): """ Convert bounding boxes from center format (cx, cy, width, height) to corner format (xmin, ymin, xmax, ymax) Args: - boxes: numpy array of tensor containing all the boxes to be converted Returns: - A numpy array or tensor of converted boxes """ temp = boxes.copy() temp[..., 0] = boxes[..., 0] - (boxes[..., 2] / 2) # xmin temp[..., 1] = boxes[..., 1] - (boxes[..., 3] / 2) # ymin temp[..., 2] = boxes[..., 0] + (boxes[..., 2] / 2) # xmax temp[..., 3] = boxes[..., 1] + (boxes[..., 3] / 2) # ymax return temp
0.908541
0.767123
def exact_match(gt_s, gt_e, pr_s, pr_e): """ Evaluate exact match of a predicted span over a ground truth span. Args: gt_s: index of the ground truth start position gt_e: index of the ground truth end position pr_s: index of the predicted start position pr_e: index of the predicted end position """ return gt_s == pr_s and gt_e == pr_e
0.779741
0.658424
def Pluralize(num, word, plural=None): """Pluralize word based on num. Args: num: int, the number of objects to count. word: str, the word to pluralize. plural: str, the plural form of word if not "add s" Returns: str: the plural or singular form of word in accord with num. """ if num == 1: return word return plural or word + 's'
0.708213
0.614539

Dataset Card for "python_functions_filtered"

Python functions extracted from starcoder base. Only functions with minimal external dependencies were chosen. They were filtered manually, and also based on learning value and quality.

Downloads last month
39
Edit dataset card