code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def add_from_db(self, database, files):
for f in files:
annotation = database.annotations(f)
image_path = database.original_file_name(f)
self.add_image(image_path, [annotation]) | Adds images and bounding boxes for the given files of a database that follows the :py:ref:`bob.bio.base.database.BioDatabase <bob.bio.base>` interface.
**Parameters:**
``database`` : a derivative of :py:class:`bob.bio.base.database.BioDatabase`
The database interface, which provides file names and annotations for the given ``files``
``files`` : :py:class:`bob.bio.base.database.BioFile` or compatible
The files (as returned by :py:meth:`bob.bio.base.database.BioDatabase.objects`) which should be added to the training list |
def save(self, list_file):
bob.io.base.create_directories_safe(os.path.dirname(list_file))
with open(list_file, 'w') as f:
for i in range(len(self.image_paths)):
f.write(self.image_paths[i])
for bbx in self.bounding_boxes[i]:
f.write("\t[%f %f %f %f]" % (bbx.top_f, bbx.left_f, bbx.size_f[0], bbx.size_f[1]))
f.write("\n") | Saves the current list of annotations to the given file.
**Parameters:**
``list_file`` : str
The name of a list file to write the currently stored list into |
def load(self, list_file):
with open(list_file) as f:
for line in f:
if line and line[0] != '#':
splits = line.split()
bounding_boxes = []
for i in range(1, len(splits), 4):
assert splits[i][0] == '[' and splits[i+3][-1] == ']'
bounding_boxes.append(BoundingBox(topleft=(float(splits[i][1:]), float(splits[i+1])), size=(float(splits[i+2]), float(splits[i+3][:-1]))))
self.image_paths.append(splits[0])
self.bounding_boxes.append(bounding_boxes) | Loads the list of annotations from the given file and **appends** it to the current list.
``list_file`` : str
The name of a list file to load and append |
def iterate(self, max_number_of_files=None):
indices = quasi_random_indices(len(self), max_number_of_files)
for index in indices:
image = bob.io.base.load(self.image_paths[index])
if len(image.shape) == 3:
image = bob.ip.color.rgb_to_gray(image)
# return image and bounding box as iterator
yield image, self.bounding_boxes[index], self.image_paths[index] | iterate([max_number_of_files]) -> image, bounding_boxes, image_file
Yields the image and the bounding boxes stored in the training set as an iterator.
This function loads the images and converts them to gray-scale.
It yields the image, the list of bounding boxes and the original image file name.
**Parameters:**
``max_number_of_files`` : int or ``None``
If specified, limit the number of returned data by sub-selection using :py:func:`quasi_random_indices`
**Yields:**
``image`` : array_like(2D)
The image loaded from file and converted to gray scale
``bounding_boxes`` : [:py:class:`BoundingBox`]
A list of bounding boxes, where faces are found in the image; might be empty (in case of pure background images)
`` image_file`` : str
The name of the original image that was read |
def _feature_file(self, parallel = None, index = None):
if index is None:
index = 0 if parallel is None or "SGE_TASK_ID" not in os.environ else int(os.environ["SGE_TASK_ID"])
return os.path.join(self.feature_directory, "Features_%02d.hdf5" % index) | Returns the name of an intermediate file for storing features. |
def feature_extractor(self):
extractor_file = os.path.join(self.feature_directory, "Extractor.hdf5")
if not os.path.exists(extractor_file):
raise IOError("Could not found extractor file %s. Did you already run the extraction process? Did you specify the correct `feature_directory` in the constructor?" % extractor_file)
hdf5 = bob.io.base.HDF5File(extractor_file)
return FeatureExtractor(hdf5) | feature_extractor() -> extractor
Returns the feature extractor used to extract the positive and negative features.
This feature extractor is stored to file during the :py:meth:`extract` method ran, so this function reads that file (from the ``feature_directory`` set in the constructor) and returns its content.
**Returns:**
``extractor`` : :py:class:`FeatureExtractor`
The feature extractor used to extract the features stored in the ``feature_directory`` |
def get(self, param, default=EMPTY):
if not self.has(param):
if default is not EMPTY:
return default
raise ParamNotFoundException("value for %s not found" % param)
context_dict = copy.deepcopy(self.manifest.get_context_dict())
for k, v in self.raw_dict.items():
context_dict["%s:%s" % (self.feature_name, k)] = v
cur_value = self.raw_dict[param]
prev_value = None
max_depth = 5
# apply the context until doing so does not change the value
while cur_value != prev_value and max_depth > 0:
prev_value = cur_value
try:
cur_value = str(prev_value) % context_dict
except KeyError:
e = sys.exc_info()[1]
key = e.args[0]
if key.startswith('config:'):
missing_key = key.split(':')[1]
if self.manifest.inputs.is_input(missing_key):
val = self.manifest.inputs.get_input(missing_key)
context_dict[key] = val
else:
logger.warn("Could not specialize %s! Error: %s" % (self.raw_dict[param], e))
return self.raw_dict[param]
except ValueError:
# this is an esoteric error, and this implementation
# forces a terrible solution. Sorry.
# using the standard escaping syntax in python is a mistake.
# if a value has a "%" inside (e.g. a password), a ValueError
# is raised, causing an issue
return cur_value
max_depth -= 1
return cur_value | Returns the nparam value, and returns the default if it doesn't exist.
If default is none, an exception will be raised instead.
the returned parameter will have been specialized against the global context |
def set(self, param, value):
self.raw_dict[param] = value
self.manifest.set(self.feature_name, param, value) | sets the param to the value provided |
def remove(self, param):
if self.has(param):
del(self.raw_dict[param])
self.manifest.remove_option(self.feature_name, param) | Remove a parameter from the manifest |
def set_if_empty(self, param, default):
if not self.has(param):
self.set(param, default) | Set the parameter to the default if it doesn't exist |
def to_dict(self):
return dict((k, str(self.get(k))) for k in self.raw_dict) | Returns the context, fully specialized, as a dictionary |
def write_to_manifest(self):
self.manifest.remove_section(self.feature_name)
self.manifest.add_section(self.feature_name)
for k, v in self.raw_dict.items():
self.manifest.set(self.feature_name, k, v) | Overwrites the section of the manifest with the featureconfig's value |
def mro_resolve(name, bases, dict):
if name in dict:
return dict[name]
for base in bases:
if hasattr(base, name):
return getattr(base, name)
try:
return mro_resolve(name, base.__bases__, {})
except KeyError:
pass
raise KeyError(name) | Given a tuple of baseclasses and a dictionary that takes precedence
over any value in the bases, finds a value with the specified *name*
and returns it. Raises #KeyError if the value can not be found. |
def convert_radian(coord, *variables):
if any(v.attrs.get('units') == 'radian' for v in variables):
return coord * 180. / np.pi
return coord | Convert the given coordinate from radian to degree
Parameters
----------
coord: xr.Variable
The variable to transform
``*variables``
The variables that are on the same unit.
Returns
-------
xr.Variable
The transformed variable if one of the given `variables` has units in
radian |
def format_coord_func(ax, ref):
orig_format_coord = ax.format_coord
def func(x, y):
orig_s = orig_format_coord(x, y)
fmto = ref()
if fmto is None:
return orig_s
try:
orig_s += fmto.add2format_coord(x, y)
except Exception:
fmto.logger.debug(
'Failed to get plot informations for status bar!', exc_info=1)
return orig_s
return func | Create a function that can replace the
:func:`matplotlib.axes.Axes.format_coord`
Parameters
----------
ax: matplotlib.axes.Axes
The axes instance
ref: weakref.weakref
The reference to the :class:`~psyplot.plotter.Formatoption` instance
Returns
-------
function
The function that can be used to replace `ax.format_coord` |
def replace_coord(self, i):
da = next(islice(self.data_iterator, i, i+1))
name, coord = self.get_alternative_coord(da, i)
other_coords = {key: da.coords[key]
for key in set(da.coords).difference(da.dims)}
ret = da.rename({da.dims[-1]: name}).assign_coords(
**{name: coord}).assign_coords(**other_coords)
return ret | Replace the coordinate for the data array at the given position
Parameters
----------
i: int
The number of the data array in the raw data (if the raw data is
not an interactive list, use 0)
Returns
xarray.DataArray
The data array with the replaced coordinate |
def value2pickle(self):
return {key: s.get_edgecolor() for key, s in self.ax.spines.items()} | Return the current axis colors |
def set_default_formatters(self, which=None):
if which is None or which == 'minor':
self.default_formatters['minor'] = self.axis.get_minor_formatter()
if which is None or which == 'major':
self.default_formatters['major'] = self.axis.get_major_formatter() | Sets the default formatters that is used for updating to None
Parameters
----------
which: {None, 'minor', 'major'}
Specify which locator shall be set |
def plotted_data(self):
return InteractiveList(
[arr for arr, val in zip(self.iter_data,
cycle(slist(self.value)))
if val is not None]) | The data that is shown to the user |
def get_cmap(self, arr=None, cmap=None, N=None):
N = N or None
if cmap is None:
cmap = self.value
if N is None:
try:
N = self.bounds.norm.Ncmap
except AttributeError:
if arr is not None and self.bounds.norm is not None:
N = len(np.unique(self.bounds.norm(arr.ravel())))
if N is not None:
return get_cmap(cmap, N)
return get_cmap(cmap) | Get the :class:`matplotlib.colors.Colormap` for plotting
Parameters
----------
arr: np.ndarray
The array to plot
cmap: str or matplotlib.colors.Colormap
The colormap to use. If None, the :attr:`value` of this
formatoption is used
N: int
The number of colors in the colormap. If None, the norm of the
:attr:`bounds` formatoption is used and, if necessary, the
given array `arr`
Returns
-------
matplotlib.colors.Colormap
The colormap returned by :func:`psy_simple.colors.get_cmap` |
def get_fmt_widget(self, parent, project):
from psy_simple.widgets.colors import CMapFmtWidget
return CMapFmtWidget(parent, self, project) | Open a :class:`psy_simple.widget.CMapFmtWidget` |
def xcoord(self):
return self.decoder.get_x(self.data, coords=self.data.coords) | The x coordinate :class:`xarray.Variable` |
def ycoord(self):
return self.decoder.get_y(self.data, coords=self.data.coords) | The y coordinate :class:`xarray.Variable` |
def cell_nodes_x(self):
decoder = self.decoder
xcoord = self.xcoord
data = self.data
xbounds = decoder.get_cell_node_coord(
data, coords=data.coords, axis='x')
if self.plotter.convert_radian:
xbounds = convert_radian(xbounds, xcoord, xbounds)
return xbounds.values | The unstructured x-boundaries with shape (N, m) where m > 2 |
def cell_nodes_y(self):
decoder = self.decoder
ycoord = self.ycoord
data = self.data
ybounds = decoder.get_cell_node_coord(
data, coords=data.coords, axis='y')
if self.plotter.convert_radian:
ybounds = convert_radian(ybounds, ycoord, ybounds)
return ybounds.values | The unstructured y-boundaries with shape (N, m) where m > 2 |
def axis(self):
return getattr(
self.colorbar.ax, self.axis_locations[self.position] + 'axis') | axis of the colorbar with the ticks. Will be overwritten during
update process. |
def default_formatters(self):
if self._default_formatters:
return self._default_formatters
else:
self.set_default_formatters()
return self._default_formatters | Default locator of the axis of the colorbars |
def xcoord(self):
v = next(self.raw_data.psy.iter_base_variables)
return self.decoder.get_x(v, coords=self.data.coords) | The x coordinate :class:`xarray.Variable` |
def ycoord(self):
v = next(self.raw_data.psy.iter_base_variables)
return self.decoder.get_y(v, coords=self.data.coords) | The y coordinate :class:`xarray.Variable` |
def add2format_coord(self, x, y):
u, v = self.data
uname, vname = self.data.coords['variable'].values
xcoord = self.xcoord
ycoord = self.ycoord
if self.decoder.is_triangular(self.raw_data[0]):
x, y, z1, z2 = self.get_xyz_tri(xcoord, x, ycoord, y, u, v)
elif xcoord.ndim == 1:
x, y, z1, z2 = self.get_xyz_1d(xcoord, x, ycoord, y, u, v)
elif xcoord.ndim == 2:
x, y, z1, z2 = self.get_xyz_2d(xcoord, x, ycoord, y, u, v)
speed = (z1**2 + z2**2)**0.5
xunit = xcoord.attrs.get('units', '')
if xunit:
xunit = ' ' + xunit
yunit = ycoord.attrs.get('units', '')
if yunit:
yunit = ' ' + yunit
zunit = u.attrs.get('units', '')
if zunit:
zunit = ' ' + zunit
return (', vector data: %s: %.4g%s, %s: %.4g%s, %s: %.4g%s, '
'%s: %.4g%s, absolute: %.4g%s') % (
xcoord.name, x, xunit, ycoord.name, y, yunit,
uname, z1, zunit, vname, z2, zunit,
speed, zunit) | Additional information for the :meth:`format_coord` |
def get_xyz_tri(self, xcoord, x, ycoord, y, u, v):
return self.get_xyz_2d(xcoord, x, ycoord, y, u, v) | Get closest x, y and z for the given `x` and `y` in `data` for
1d coords |
def get_xyz_1d(self, xcoord, x, ycoord, y, u, v):
xclose = xcoord.indexes[xcoord.name].get_loc(x, method='nearest')
yclose = ycoord.indexes[ycoord.name].get_loc(y, method='nearest')
uval = u[yclose, xclose].values
vval = v[yclose, xclose].values
return xcoord[xclose].values, ycoord[yclose].values, uval, vval | Get closest x, y and z for the given `x` and `y` in `data` for
1d coords |
def get_xyz_2d(self, xcoord, x, ycoord, y, u, v):
xy = xcoord.values.ravel() + 1j * ycoord.values.ravel()
dist = np.abs(xy - (x + 1j * y))
imin = np.nanargmin(dist)
xy_min = xy[imin]
return (xy_min.real, xy_min.imag, u.values.ravel()[imin],
v.values.ravel()[imin]) | Get closest x, y and z for the given `x` and `y` in `data` for
2d coords |
def hist2d(self, da, **kwargs):
if self.value is None or self.value == 'counts':
normed = False
else:
normed = True
y = da.values
x = da.coords[da.dims[0]].values
counts, xedges, yedges = np.histogram2d(
x, y, normed=normed, **kwargs)
if self.value == 'counts':
counts = counts / counts.sum().astype(float)
return counts, xedges, yedges | Make the two dimensional histogram
Parameters
----------
da: xarray.DataArray
The data source |
def _statsmodels_bivariate_kde(self, x, y, bws, xsize, ysize, xyranges):
import statsmodels.nonparametric.api as smnp
for i, (coord, bw) in enumerate(zip([x, y], bws)):
if isinstance(bw, six.string_types):
bw_func = getattr(smnp.bandwidths, "bw_" + bw)
bws[i] = bw_func(coord)
kde = smnp.KDEMultivariate([x, y], "cc", bws)
x_support = np.linspace(xyranges[0][0], xyranges[0][1], xsize)
y_support = np.linspace(xyranges[1][0], xyranges[1][1], ysize)
xx, yy = np.meshgrid(x_support, y_support)
z = kde.pdf([xx.ravel(), yy.ravel()]).reshape(xx.shape)
return x_support, y_support, z | Compute a bivariate kde using statsmodels.
This function is mainly motivated through
seaborn.distributions._statsmodels_bivariate_kde |
def check_data(cls, name, dims, is_unstructured=None):
if isinstance(name, six.string_types) or not is_iterable(name):
name = [name]
dims = [dims]
N = len(name)
if len(dims) != N:
return [False] * N, [
'Number of provided names (%i) and dimensions '
'%(i) are not the same' % (N, len(dims))] * N
checks = [True] * N
messages = [''] * N
for i, (n, d) in enumerate(zip(name, dims)):
if n != 0 and not n:
checks[i] = False
messages[i] = 'At least one variable name is required!'
elif ((not isstring(n) and is_iterable(n) and
len(n) > cls.allowed_vars) and
len(d) != (cls.allowed_dims - len(slist(n)))):
checks[i] = False
messages[i] = 'Only %i names are allowed per array!' % (
cls.allowed_vars)
elif len(d) != cls.allowed_dims:
checks[i] = False
messages[i] = 'Only %i-dimensional arrays are allowed!' % (
cls.allowed_dims)
return checks, messages | A validation method for the data shape
Parameters
----------
name: str or list of str
The variable names (at maximum :attr:`allowed_vars` variables per
array)
dims: list with length 1 or list of lists with length 1
The dimension of the arrays. Only 1D-Arrays are allowed
is_unstructured: bool or list of bool, optional
True if the corresponding array is unstructured. This keyword is
ignored
Returns
-------
%(Plotter.check_data.returns)s |
def check_data(cls, name, dims, is_unstructured):
if isinstance(name, six.string_types) or not is_iterable(name):
name = [name]
dims = [dims]
is_unstructured = [is_unstructured]
N = len(name)
if N != 1:
return [False] * N, [
'Number of provided names (%i) must equal 1!' % (N)] * N
elif len(dims) != 1:
return [False], [
'Number of provided dimension lists (%i) must equal 1!' % (
len(dims))]
elif len(is_unstructured) != 1:
return [False], [
('Number of provided unstructured information (%i) must '
'equal 1!') % (len(is_unstructured))]
if name[0] != 0 and not name[0]:
return [False], ['At least one variable name must be provided!']
# unstructured arrays have only 1 dimension
dimlen = cls.allowed_dims
if is_unstructured[0]:
dimlen -= 1
# Check that the array is two-dimensional
#
# if more than one array name is provided, the dimensions should be
# one les than dimlen to have a 2D array
if (not isstring(name[0]) and not is_iterable(name[0])
and len(name[0]) != 1 and len(dims[0]) != dimlen - 1):
return [False], ['Only one name is allowed per array!']
# otherwise the number of dimensions must equal dimlen
if len(dims[0]) != dimlen:
return [False], [
'An array with dimension %i is required, not %i' % (
dimlen, len(dims[0]))]
return [True], [''] | A validation method for the data shape
Parameters
----------
name: str or list of str
The variable names (one variable per array)
dims: list with length 1 or list of lists with length 1
The dimension of the arrays. Only 1D-Arrays are allowed
is_unstructured: bool or list of bool
True if the corresponding array is unstructured.
Returns
-------
%(Plotter.check_data.returns)s |
def check_data(cls, name, dims, is_unstructured):
if isinstance(name, six.string_types) or not is_iterable(name):
name = [name]
dims = [dims]
is_unstructured = [is_unstructured]
msg = ('Two arrays are required (one for the scalar and '
'one for the vector field)')
if len(name) < 2:
return [None], [msg]
elif len(name) > 2:
return [False], [msg]
valid1, msg1 = Simple2DBase.check_data(name[:1], dims[0:1],
is_unstructured[:1])
valid2, msg2 = BaseVectorPlotter.check_data(name[1:], dims[1:],
is_unstructured[1:])
return valid1 + valid2, msg1 + msg2 | A validation method for the data shape
Parameters
----------
name: list of str with length 2
The variable names (one for the first, two for the second array)
dims: list with length 2 of lists with length 1
The dimension of the arrays. Only 2D-Arrays are allowed (or 1-D if
an array is unstructured)
is_unstructured: bool or list of bool
True if the corresponding array is unstructured.
Returns
-------
%(Plotter.check_data.returns)s |
def record_diff(old, new):
old, new = _norm_json_params(old, new)
return json_delta.diff(new, old, verbose=False) | Return a JSON-compatible structure capable turn the `new` record back
into the `old` record. The parameters must be structures compatible with
json.dumps *or* strings compatible with json.loads. Note that by design,
`old == record_patch(new, record_diff(old, new))` |
def record_patch(rec, diff):
rec, diff = _norm_json_params(rec, diff)
return json_delta.patch(rec, diff, in_place=False) | Return the JSON-compatible structure that results from applying the
changes in `diff` to the record `rec`. The parameters must be structures
compatible with json.dumps *or* strings compatible with json.loads. Note
that by design, `old == record_patch(new, record_diff(old, new))` |
def append_diff_hist(diff, diff_hist=list()):
diff, diff_hist = _norm_json_params(diff, diff_hist)
if not diff_hist:
diff_hist = list()
diff_hist.append({'diff': diff, 'diff_date': now_field()})
return diff_hist | Given a diff as generated by record_diff, append a diff record to the
list of diff_hist records. |
def parse_diff_hist(curr_obj, diff_hist):
curr_obj, diff_hist = _norm_json_params(curr_obj, diff_hist)
yield (json.dumps(curr_obj), None)
last_obj = curr_obj
for one in reversed(diff_hist):
last_obj = record_patch(last_obj, one['diff'])
yield json.dumps(last_obj), one['diff_date'] | Given a diff_hist as created, appended by append_diff_hist, yield the
versions of the object start with curr_obj and working backwards in time.
Each instance yielded is of the form (obj, date-string) where obj is the
JSON version of the object created by applying a diff in the
diff history and date-string is a string representing the date/time that
the diff was taken |
def to_dict(self):
data = {
'id': self.id,
'referenceId': self.reference_id,
'type': self.type,
'displayName': self.display_name,
'remoteUrl': self.remote_url}
for key in data.keys():
if data[key] == None:
data.pop(key)
return data | Converts object into a dictionary. |
def to_dict(self):
data = {
'url': self.url,
'encodingRate': self.encoding_rate,
'frameHeight': self.frame_height,
'frameWidth': self.frame_width,
'size': self.size,
'remoteUrl': self.remote_url,
'remoteStream': self.remote_stream_name,
'videoDuration': self.video_duration,
'videoCodec': self.video_codec}
[data.pop(key) for key in data.keys() if data[key] is None]
return data | Converts object into a dictionary. |
def to_dict(self):
data = {
'name': self.name,
'video_id': self.video_id,
'time': self.time,
'forceStop': self.force_stop,
'type': self.type,
'metadata': self.metadata}
for key in data.keys():
if data[key] == None:
data.pop(key)
return data | Converts object into a dictionary. |
def _find_video(self):
data = None
if self.id:
data = self.connection.get_item(
'find_video_by_id', video_id=self.id)
elif self.reference_id:
data = self.connection.get_item(
'find_video_by_reference_id', reference_id=self.reference_id)
if data:
self._load(data) | Lookup and populate ``pybrightcove.video.Video`` object given a video
id or reference_id. |
def _to_dict(self):
for i, tag in enumerate(self.tags):
if tag in ("", None):
self.tags.pop(i)
data = {
'name': self.name,
'referenceId': self.reference_id,
'shortDescription': self.short_description,
'longDescription': self.long_description,
'itemState': self.item_state,
'linkURL': self.link_url,
'linkText': self.link_text,
'tags': self.tags,
'economics': self.economics,
'id': self.id,
'end_date': _make_tstamp(self.end_date),
'start_date': _make_tstamp(self.start_date)}
if len(self.renditions) > 0:
data['renditions'] = []
for r in self.renditions:
data['renditions'].append(r.to_dict())
if len(self.metadata) > 0:
data['customFields'] = {}
for meta in self.metadata:
data['customFields'][meta['key']] = meta['value']
[data.pop(key) for key in data.keys() if data[key] == None]
return data | Converts object into a dictionary. |
def _load(self, data):
self.raw_data = data
self.creation_date = _convert_tstamp(data['creationDate'])
self.economics = data['economics']
self.id = data['id']
self.last_modified_date = _convert_tstamp(data['lastModifiedDate'])
self.length = data['length']
self.link_text = data['linkText']
self.link_url = data['linkURL']
self.long_description = data['longDescription']
self.name = data['name']
self.plays_total = data['playsTotal']
self.plays_trailing_week = data['playsTrailingWeek']
self.published_date = _convert_tstamp(data['publishedDate'])
self.start_date = _convert_tstamp(data.get('startDate', None))
self.end_date = _convert_tstamp(data.get('endDate', None))
self.reference_id = data['referenceId']
self.short_description = data['shortDescription']
self.tags = []
for tag in data['tags']:
self.tags.append(tag)
self.thumbnail_url = data['thumbnailURL']
self.video_still_url = data['videoStillURL'] | Deserialize a dictionary of data into a ``pybrightcove.video.Video``
object. |
def get_custom_metadata(self):
if self.id is not None:
data = self.connection.get_item(
'find_video_by_id',
video_id=self.id,
video_fields="customFields"
)
for key in data.get("customFields", {}).keys():
val = data["customFields"].get(key)
if val is not None:
self.add_custom_metadata(key, val) | Fetches custom metadta for an already exisiting Video. |
def add_custom_metadata(self, key, value, meta_type=None):
self.metadata.append({'key': key, 'value': value, 'type': meta_type}) | Add custom metadata to the Video. meta_type is required for XML API. |
def add_asset(self, filename, asset_type, display_name,
encoding_rate=None, frame_width=None, frame_height=None,
encode_to=None, encode_multiple=False,
h264_preserve_as_rendition=False, h264_no_processing=False):
m = hashlib.md5()
fp = file(filename, 'rb')
bits = fp.read(262144) ## 256KB
while bits:
m.update(bits)
bits = fp.read(262144)
fp.close()
hash_code = m.hexdigest()
refid = "%s-%s" % (os.path.basename(filename), hash_code)
asset = {
'filename': filename,
'type': asset_type,
'size': os.path.getsize(filename),
'refid': refid,
'hash-code': hash_code}
if encoding_rate:
asset.update({'encoding-rate': encoding_rate})
if frame_width:
asset.update({'frame-width': frame_width})
if frame_height:
asset.update({'frame-height': frame_height})
if display_name:
asset.update({'display-name': display_name})
if encode_to:
asset.update({'encode-to': encode_to})
asset.update({'encode-multiple': encode_multiple})
if encode_multiple and h264_preserve_as_rendition:
asset.update({
'h264-preserve-as-rendition': h264_preserve_as_rendition})
else:
if h264_no_processing:
asset.update({'h264-no-processing': h264_no_processing})
self.assets.append(asset) | Add an asset to the Video object. |
def save(self, create_multiple_renditions=True,
preserve_source_rendition=True,
encode_to=enums.EncodeToEnum.FLV):
if is_ftp_connection(self.connection) and len(self.assets) > 0:
self.connection.post(xml=self.to_xml(), assets=self.assets)
elif not self.id and self._filename:
self.id = self.connection.post('create_video', self._filename,
create_multiple_renditions=create_multiple_renditions,
preserve_source_rendition=preserve_source_rendition,
encode_to=encode_to,
video=self._to_dict())
elif not self.id and len(self.renditions) > 0:
self.id = self.connection.post('create_video',
video=self._to_dict())
elif self.id:
data = self.connection.post('update_video', video=self._to_dict())
if data:
self._load(data) | Creates or updates the video |
def delete(self, cascade=False, delete_shares=False):
if self.id:
self.connection.post('delete_video', video_id=self.id,
cascade=cascade, delete_shares=delete_shares)
self.id = None | Deletes the video. |
def get_upload_status(self):
if self.id:
return self.connection.post('get_upload_status', video_id=self.id) | Get the status of the video that has been uploaded. |
def share(self, accounts):
if not isinstance(accounts, (list, tuple)):
msg = "Video.share expects an iterable argument"
raise exceptions.PyBrightcoveError(msg)
raise exceptions.PyBrightcoveError("Not yet implemented") | Create a share |
def set_image(self, image, filename=None, resize=False):
if self.id:
data = self.connection.post('add_image', filename,
video_id=self.id, image=image.to_dict(), resize=resize)
if data:
self.image = Image(data=data) | Set the poster or thumbnail of a this Vidoe. |
def find_related(self, _connection=None, page_size=100, page_number=0):
if self.id:
return connection.ItemResultSet('find_related_videos',
Video, _connection, page_size, page_number, None, None,
video_id=self.id) | List all videos that are related to this one. |
def delete_video(video_id, cascade=False, delete_shares=False,
_connection=None):
c = _connection
if not c:
c = connection.APIConnection()
c.post('delete_video', video_id=video_id, cascade=cascade,
delete_shares=delete_shares) | Delete the video represented by the ``video_id`` parameter. |
def get_status(video_id, _connection=None):
c = _connection
if not c:
c = connection.APIConnection()
return c.post('get_upload_status', video_id=video_id) | Get the status of a video given the ``video_id`` parameter. |
def activate(video_id, _connection=None):
c = _connection
if not c:
c = connection.APIConnection()
data = c.post('update_video', video={
'id': video_id,
'itemState': enums.ItemStateEnum.ACTIVE})
return Video(data=data, _connection=c) | Mark a video as Active |
def find_modified(since, filter_list=None, _connection=None, page_size=25,
page_number=0, sort_by=enums.DEFAULT_SORT_BY,
sort_order=enums.DEFAULT_SORT_ORDER):
filters = []
if filter_list is not None:
filters = filter_list
if not isinstance(since, datetime):
msg = 'The parameter "since" must be a datetime object.'
raise exceptions.PyBrightcoveError(msg)
fdate = int(since.strftime("%s")) / 60 ## Minutes since UNIX time
return connection.ItemResultSet('find_modified_videos',
Video, _connection, page_size, page_number, sort_by, sort_order,
from_date=fdate, filter=filters) | List all videos modified since a certain date. |
def find_all(_connection=None, page_size=100, page_number=0,
sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER):
return connection.ItemResultSet('find_all_videos', Video,
_connection, page_size, page_number, sort_by, sort_order) | List all videos. |
def find_by_tags(and_tags=None, or_tags=None, _connection=None,
page_size=100, page_number=0, sort_by=enums.DEFAULT_SORT_BY,
sort_order=enums.DEFAULT_SORT_ORDER):
err = None
if not and_tags and not or_tags:
err = "You must supply at least one of either and_tags or or_tags."
if and_tags and not isinstance(and_tags, (tuple, list)):
err = "The and_tags argument for Video.find_by_tags must an "
err += "iterable"
if or_tags and not isinstance(or_tags, (tuple, list)):
err = "The or_tags argument for Video.find_by_tags must an "
err += "iterable"
if err:
raise exceptions.PyBrightcoveError(err)
atags = None
otags = None
if and_tags:
atags = ','.join([str(t) for t in and_tags])
if or_tags:
otags = ','.join([str(t) for t in or_tags])
return connection.ItemResultSet('find_videos_by_tags',
Video, _connection, page_size, page_number, sort_by, sort_order,
and_tags=atags, or_tags=otags) | List videos given a certain set of tags. |
def find_by_text(text, _connection=None, page_size=100, page_number=0,
sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER):
return connection.ItemResultSet('find_videos_by_text',
Video, _connection, page_size, page_number, sort_by, sort_order,
text=text) | List videos that match the ``text`` in title or description. |
def find_by_campaign(campaign_id, _connection=None, page_size=100,
page_number=0, sort_by=enums.DEFAULT_SORT_BY,
sort_order=enums.DEFAULT_SORT_ORDER):
return connection.ItemResultSet(
'find_videos_by_campaign_id', Video, _connection, page_size,
page_number, sort_by, sort_order, campaign_id=campaign_id) | List all videos for a given campaign. |
def find_by_user(user_id, _connection=None, page_size=100, page_number=0,
sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER):
return connection.ItemResultSet('find_videos_by_user_id',
Video, _connection, page_size, page_number, sort_by, sort_order,
user_id=user_id) | List all videos uploaded by a certain user. |
def find_by_reference_ids(reference_ids, _connection=None, page_size=100,
page_number=0, sort_by=enums.DEFAULT_SORT_BY,
sort_order=enums.DEFAULT_SORT_ORDER):
if not isinstance(reference_ids, (list, tuple)):
err = "Video.find_by_reference_ids expects an iterable argument"
raise exceptions.PyBrightcoveError(err)
ids = ','.join(reference_ids)
return connection.ItemResultSet(
'find_videos_by_reference_ids', Video, _connection, page_size,
page_number, sort_by, sort_order, reference_ids=ids) | List all videos identified by a list of reference ids |
def find_by_ids(ids, _connection=None, page_size=100, page_number=0,
sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER):
if not isinstance(ids, (list, tuple)):
err = "Video.find_by_ids expects an iterable argument"
raise exceptions.PyBrightcoveError(err)
ids = ','.join([str(i) for i in ids])
return connection.ItemResultSet('find_videos_by_ids',
Video, _connection, page_size, page_number, sort_by, sort_order,
video_ids=ids) | List all videos identified by a list of Brightcove video ids |
def read_gpx(xml, gpxns=None):
tree = etree.parse(xml)
gpx_element = tree.getroot()
return parse_gpx(gpx_element, gpxns=gpxns) | Parse a GPX file into a GpxModel.
Args:
xml: A file-like-object opened in binary mode - that is containing
bytes rather than characters. The root element of the XML should
be a <gpx> element containing a version attribute. GPX versions
1.1 is supported.
gpxns: The XML namespace for GPX in Clarke notation (i.e. delimited
by curly braces). If None, (the default) the namespace used in
the document will be determined automatically. |
def add_log_handler(log, handler=None, debug=None, fmt=None):
if debug:
log.setLevel(debug)
if handler:
# if not fmt:
# fmt = __LOG_FMT
if fmt:
handler.setFormatter(fmt)
log.addHandler(handler) | 为一个 :class:`logging.Logger` 的实例增加 handler。
:param Logger log: 需要处理的 :class:`logging.Logger` 的实例。
:param Handler handler: 一个 :class:`logging.Handler` 的实例。
:param int debug: Debug 级别。
:param str fmt: Handler 的 Formatter。 |
def _sumDiceRolls(self, rollList):
if isinstance(rollList, RollList):
self.rolls.append(rollList)
return rollList.sum()
else:
return rollList | convert from dice roll structure to a single integer result |
def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', pf _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p) | Turn all capturing groups in a regular expression pattern into
non-capturing groups. |
def cookies(self):
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
if len(cookies) > self.MAX_PARAMS:
raise HTTPError(413, 'Too many cookies')
return FormsDict((c.key, c.value) for c in cookies) | Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. |
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
if len(pairs) > self.MAX_PARAMS:
raise HTTPError(413, 'Too many parameters')
for key, value in pairs:
get[key] = value
return gef query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
if len(pairs) > self.MAX_PARAMS:
raise HTTPError(413, 'Too many parameters')
for key, value in pairs:
get[key] = value
return get | The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. |
def copy(self):
''' Returns a copy of self. '''
# TODO
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copf copy(self):
''' Returns a copy of self. '''
# TODO
copy = Response()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
return copy | Returns a copy of self. |
def annotated_references(obj):
references = KeyTransformDict(transform=id, default_factory=list)
for type_ in type(obj).__mro__:
if type_ in type_based_references:
type_based_references[type_](obj, references)
add_attr(obj, "__dict__", references)
add_attr(obj, "__class__", references)
if isinstance(obj, type):
add_attr(obj, "__mro__", references)
return references | Return known information about references held by the given object.
Returns a mapping from referents to lists of descriptions. Note that there
may be more than one edge leading to any particular referent; hence the
need for a list. Descriptions are currently strings. |
def disttar(target, source, env):
import tarfile
env_dict = env.Dictionary()
if env_dict.get("DISTTAR_FORMAT") in ["gz", "bz2"]:
tar_format = env_dict["DISTTAR_FORMAT"]
else:
tar_format = ""
# split the target directory, filename, and stuffix
base_name = str(target[0]).split('.tar')[0]
(target_dir, dir_name) = os.path.split(base_name)
# create the target directory if it does not exist
if target_dir and not os.path.exists(target_dir):
os.makedirs(target_dir)
# open our tar file for writing
print >> sys.stderr, 'DistTar: Writing %s' % str(target[0])
print >> sys.stderr, ' with contents: %s' % [str(s) for s in source]
tar = tarfile.open(str(target[0]), "w:%s" % tar_format)
# write sources to our tar file
for item in source:
item = str(item)
sys.stderr.write(".")
#print "Adding to TAR file: %s/%s" % (dir_name,item)
tar.add(item,'%s/%s' % (dir_name,item))
# all done
sys.stderr.write("\n") #print "Closing TAR file"
tar.close() | tar archive builder |
def disttar_suffix(env, sources):
env_dict = env.Dictionary()
if env_dict.has_key("DISTTAR_FORMAT") and env_dict["DISTTAR_FORMAT"] in ["gz", "bz2"]:
return ".tar." + env_dict["DISTTAR_FORMAT"]
else:
return ".tar" | tar archive suffix generator |
def generate(env):
disttar_action=SCons.Action.Action(disttar, disttar_string)
env['BUILDERS']['DistTar'] = Builder(
action=disttar_action
, emitter=disttar_emitter
, suffix = disttar_suffix
, target_factory = env.fs.Entry
)
env.AppendUnique(
DISTTAR_FORMAT = 'gz'
) | Add builders and construction variables for the DistTar builder. |
def ensure_table(self, cls):
id_len = len(uuid())
index_names = cls.index_names() or []
cols = [
'id char(%d) primary key' % (id_len,),
'value jsonb'
] + [
name + ' text' for name in index_names
]
table_name = cls.get_table_name()
with self._conn() as conn:
with conn.cursor() as cur:
cur.execute('create table if not exists %s (%s);' % (
table_name,
','.join(cols)
))
for name in index_names:
cur.execute('create index if not exists %s on %s(%s);' % (
table_name + '_' + name + '_idx',
table_name,
name
)) | Ensure table's existence - as per the gludb spec. |
def find_one(self, cls, id):
found = self.find_by_index(cls, 'id', id)
return found[0] if found else None | Find single keyed row - as per the gludb spec. |
def find_by_index(self, cls, index_name, value):
cur = self._conn().cursor()
# psycopg2 supports using Python formatters for queries
# we also request our JSON as a string for the from_data calls
query = 'select id, value::text from {0} where {1} = %s;'.format(
cls.get_table_name(),
index_name
)
found = []
with self._conn() as conn:
with conn.cursor() as cur:
cur.execute(query, (value,))
for row in cur:
id, data = str(row[0]).strip(), row[1]
obj = cls.from_data(data)
assert id == obj.id
found.append(obj)
return found | Find all rows matching index query - as per the gludb spec. |
def save(self, obj):
cur = self._conn().cursor()
tabname = obj.__class__.get_table_name()
index_names = obj.__class__.index_names() or []
col_names = ['id', 'value'] + index_names
value_holders = ['%s'] * len(col_names)
updates = ['%s = EXCLUDED.%s' % (cn, cn) for cn in col_names[1:]]
if not obj.id:
id = uuid()
obj.id = id
query = 'insert into {0} ({1}) values ({2}) on conflict(id) do update set {3};'.format(
tabname,
','.join(col_names),
','.join(value_holders),
','.join(updates),
)
values = [obj.id, obj.to_data()]
index_vals = obj.indexes() or {}
values += [index_vals.get(name, 'NULL') for name in index_names]
with self._conn() as conn:
with conn.cursor() as cur:
cur.execute(query, tuple(values)) | Save current instance - as per the gludb spec. |
def delete(self, obj):
del_id = obj.get_id()
if not del_id:
return
cur = self._conn().cursor()
tabname = obj.__class__.get_table_name()
query = 'delete from {0} where id = %s;'.format(tabname)
with self._conn() as conn:
with conn.cursor() as cur:
cur.execute(query, (del_id,)) | Required functionality. |
def authenticated_get(username, password, url, verify=True):
try:
response = requests.get(url, auth=(username, password), verify=verify)
if response.status_code == 401:
raise BadCredentialsException(
"Unable to authenticate user %s to %s with password provided!"
% (username, url))
except requests.exceptions.SSLError:
raise CertificateException("Unable to verify certificate at %s!" % url)
return response.content | Perform an authorized query to the url, and return the result |
def cleaned_request(request_type, *args, **kwargs):
s = requests.Session()
# this removes netrc checking
s.trust_env = False
return s.request(request_type, *args, **kwargs) | Perform a cleaned requests request |
def download_to_bytesio(url):
logger.info("Downloading url: {0}".format(url))
r = cleaned_request('get', url, stream=True)
stream = io.BytesIO()
total_length = int(r.headers.get('content-length'))
for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
stream.write(chunk)
stream.seek(0)
return stream | Return a bytesio object with a download bar |
def add(one, two=4, three=False):
''' This function adds two number.
:param one: first number to add
:param two: second number to add
:rtype: int
'''
s = str(int(one) + int(two))
logging.debug('logging sum from hello.py:' + s)
print 'printing sum from hello.py:', s
return f add(one, two=4, three=False):
''' This function adds two number.
:param one: first number to add
:param two: second number to add
:rtype: int
'''
s = str(int(one) + int(two))
logging.debug('logging sum from hello.py:' + s)
print 'printing sum from hello.py:', s
return s | This function adds two number.
:param one: first number to add
:param two: second number to add
:rtype: int |
def queue(self):
with self.connection_pool.acquire(block=True) as conn:
return Q(
self.routing_key,
exchange=self.exchange,
routing_key=self.routing_key
)(conn) | Message queue queue. |
def exists(self):
try:
queue = self.queue
queue.queue_declare(passive=True)
except NotFound:
return False
except ChannelError as e:
if e.reply_code == '404':
return False
raise e
return True | Test if this queue exists in the AMQP store.
Note: This doesn't work with redis as declaring queues has not effect
except creating the exchange.
:returns: True if the queue exists, else False.
:rtype: bool |
def producer(self, conn):
return Producer(
conn,
exchange=self.exchange,
routing_key=self.routing_key,
auto_declare=True,
) | Get a consumer for a connection. |
def consumer(self, conn):
return Consumer(
connection=conn,
queue=self.queue.name,
exchange=self.exchange.name,
exchange_type=self.exchange.type,
durable=self.exchange.durable,
auto_delete=self.exchange.auto_delete,
routing_key=self.routing_key,
no_ack=self.no_ack,
) | Get a consumer for a connection. |
def create_producer(self):
with self.connection_pool.acquire(block=True) as conn:
yield self.producer(conn) | Context manager that yields an instance of ``Producer``. |
def create_consumer(self):
with self.connection_pool.acquire(block=True) as conn:
yield self.consumer(conn) | Context manager that yields an instance of ``Consumer``. |
def publish(self, events):
assert len(events) > 0
with self.create_producer() as producer:
for event in events:
producer.publish(event) | Publish events. |
def consume(self, payload=True):
with self.create_consumer() as consumer:
for msg in consumer.iterqueue():
yield msg.payload if payload else msg | Consume events. |
def get_initial(self, *args, **kwargs):
initial = {}
for field in self.fields:
value = None
if hasattr(self.user, field):
value = getattr(self.user, field)
if hasattr(self.profile, field):
value = getattr(self.profile, field)
if value:
initial.update({
field: value
})
if hasattr(self.profile, 'dob'):
dob = self.profile.dob
if dob:
if 'dob_day' in self.fields:
initial.update({
'dob_day': dob.day
})
if 'dob_month' in self.fields:
initial.update({
'dob_month': dob.month
})
if 'dob_year' in self.fields:
initial.update({
'dob_year': dob.year
})
return initial | Gathers initial form values from user and profile objects
suitable for using as form's initial data. |
def save(self, *args, **kwargs):
for key, value in self.cleaned_data.items():
if value != None:
if hasattr(self.user, key):
setattr(self.user, key, value)
if hasattr(self.profile, key):
setattr(self.profile, key, value)
# set password
if 'password1' in self.cleaned_data:
if self.cleaned_data['password1']:
self.user.set_password(self.cleaned_data['password1'])
# set dob
if 'dob_day' in self.cleaned_data and 'dob_month' in self.\
cleaned_data and 'dob_year' in self.cleaned_data:
self.profile.dob = self._gen_dob()
self.user.save()
self.profile.save() | This method should be called when is_valid is true to save
relevant fields to user and profile models. |
def clean_username(self):
user = None
try:
user = User.objects.get(username__iexact=self.\
cleaned_data['username'])
except User.DoesNotExist:
return self.cleaned_data['username']
if user:
if user.username == self.user.username:
return self.cleaned_data['username']
raise forms.ValidationError(_(\
"A user with that username already exists.")) | Validate that the username is alphanumeric and is not already
in use. Don't fail if users username is provided. |
def clean(self):
if 'dob_day' in self.cleaned_data and 'dob_month' in \
self.cleaned_data and 'dob_year' in self.cleaned_data:
try:
self._gen_dob()
except ValueError:
self._errors['dob_day'] = (_(\
"You provided an invalid date."),)
if 'password1' in self.cleaned_data and 'password2' in \
self.cleaned_data:
if self.cleaned_data['password1'] != \
self.cleaned_data['password2']:
raise forms.ValidationError(_(\
"The two password fields didn't match."))
return self.cleaned_data | Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.