gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class bandwidth_constraints(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth-constraints. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines bandwidth-constraints. For DS-TE, the
existing Maximum Reservable link bandwidth parameter is retained,
but its semantics is generalized and interpreted as the aggregate
bandwidth constraint across all Class-Types
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "bandwidth-constraints"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"bandwidth-constraints",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 22.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 22.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class bandwidth_constraints(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth-constraints. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines bandwidth-constraints. For DS-TE, the
existing Maximum Reservable link bandwidth parameter is retained,
but its semantics is generalized and interpreted as the aggregate
bandwidth constraint across all Class-Types
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "bandwidth-constraints"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"bandwidth-constraints",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container)
YANG Description: State parameters of IS Extended Reachability sub-TLV 22.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters of IS Extended Reachability sub-TLV 22.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import logging
import unittest
from collections import OrderedDict
import numpy as np
from parameterized import parameterized
from past.builtins import unicode
from apache_beam.testing import datatype_inference
from apache_beam.typehints import typehints
try:
import pyarrow as pa
except ImportError:
pa = None
TEST_DATA = [
{
"name": "empty",
"data": [],
"type_schema": OrderedDict([]),
"pyarrow_schema": pa.schema([]) if pa is not None else None,
"avro_schema": {
"namespace": "example.avro",
"name": "User",
"type": "record",
"fields": [],
},
},
{
"name": "main",
"data": [
OrderedDict([
("a", 1),
("b", 0.12345),
("c", u"Hello World!!"),
("d", np.array([1, 2, 3])),
("e", b"some bytes"),
]),
OrderedDict([
("a", -5),
("b", 1234.567),
("e", b"more bytes"),
]),
OrderedDict([
("a", 100000),
("c", u"XoXoX"),
("d", np.array([4, 5, 6])),
("e", b""),
]),
],
"type_schema": OrderedDict([
("a", int),
("b", float),
("c", unicode),
("d", np.ndarray),
("e", bytes),
]),
"pyarrow_schema": pa.schema([
("a", pa.int64()),
("b", pa.float64()),
("c", pa.string()),
("d", pa.list_(pa.int64())),
("e", pa.binary()),
]) if pa is not None else None,
"avro_schema": {
"namespace": "example.avro",
"name": "User",
"type": "record",
"fields": [
{
"name": "a", "type": "int"
},
{
"name": "b", "type": "double"
},
{
"name": "c", "type": "string"
},
{
"name": "d", "type": "bytes"
},
{
"name": "e", "type": "bytes"
},
],
},
},
]
def nullify_data_and_schemas(test_data):
"""Add a row with all columns set to None and adjust the schemas accordingly.
"""
def nullify_avro_schema(schema):
"""Add a 'null' type to every field."""
schema = schema.copy()
new_fields = []
for field in schema["fields"]:
if isinstance(field["type"], str):
new_fields.append({
"name": field["name"], "type": sorted([field["type"], "null"])
})
else:
new_fields.append({
"name": field["name"], "type": sorted(field["type"] + ["null"])
})
schema["fields"] = new_fields
return schema
def get_collumns_in_order(test_data):
"""Get a list of columns while trying to maintain original order.
.. note::
Columns which do not apear until later rows are added to the end,
even if they preceed some columns which have already been added.
"""
_seen = set()
columns = [
c for test_case in test_data for row in test_case["data"] for c in row
if c not in _seen and not _seen.add(c)
]
return columns
nullified_test_data = []
columns = get_collumns_in_order(test_data)
for test_case in test_data:
if not test_case["data"]:
continue
test_case = test_case.copy()
test_case["name"] = test_case["name"] + "_nullified"
test_case["data"] = test_case["data"] + [
OrderedDict([(c, None) for c in columns])
]
test_case["type_schema"] = OrderedDict([
(k, typehints.Union[v, type(None)]) for k,
v in test_case["type_schema"].items()
])
test_case["avro_schema"] = nullify_avro_schema(test_case["avro_schema"])
nullified_test_data.append(test_case)
return nullified_test_data
TEST_DATA += nullify_data_and_schemas(TEST_DATA)
class DatatypeInferenceTest(unittest.TestCase):
@parameterized.expand([(d["name"], d["data"], d["type_schema"])
for d in TEST_DATA])
def test_infer_typehints_schema(self, _, data, schema):
typehints_schema = datatype_inference.infer_typehints_schema(data)
self.assertEqual(typehints_schema, schema)
@parameterized.expand([(d["name"], d["data"], d["pyarrow_schema"])
for d in TEST_DATA])
@unittest.skipIf(pa is None, "PyArrow is not installed")
def test_infer_pyarrow_schema(self, _, data, schema):
pyarrow_schema = datatype_inference.infer_pyarrow_schema(data)
self.assertEqual(pyarrow_schema, schema)
@parameterized.expand([(d["name"], d["data"], d["avro_schema"])
for d in TEST_DATA])
def test_infer_avro_schema(self, _, data, schema):
schema = schema.copy() # Otherwise, it would be mutated by `.pop()`
avro_schema = datatype_inference.infer_avro_schema(data, use_fastavro=False)
avro_schema = avro_schema.to_json()
fields1 = avro_schema.pop("fields")
fields2 = schema.pop("fields")
self.assertDictEqual(avro_schema, schema)
for field1, field2 in zip(fields1, fields2):
self.assertDictEqual(field1, field2)
@parameterized.expand([(d["name"], d["data"], d["avro_schema"])
for d in TEST_DATA])
def test_infer_fastavro_schema(self, _, data, schema):
from fastavro import parse_schema
schema = parse_schema(schema)
avro_schema = datatype_inference.infer_avro_schema(data, use_fastavro=True)
fields1 = avro_schema.pop("fields")
fields2 = schema.pop("fields")
self.assertDictEqual(avro_schema, schema)
for field1, field2 in zip(fields1, fields2):
self.assertDictEqual(field1, field2)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six import PY3, text_type as unicode, unichr
import sys
from six.moves.html_parser import HTMLParser
from six.moves.html_entities import entitydefs
NON_BREAKING_SPACE = u'\xA0'
class HtmlReader(HTMLParser):
IGNORE = 0
INITIAL = 1
PROCESS = 2
def __init__(self):
HTMLParser.__init__(self)
self._encoding = 'ISO-8859-1'
self._handlers = {'table_start' : self.table_start,
'table_end' : self.table_end,
'tr_start' : self.tr_start,
'tr_end' : self.tr_end,
'td_start' : self.td_start,
'td_end' : self.td_end,
'th_start' : self.td_start,
'th_end' : self.td_end,
'br_start' : self.br_start,
'meta_start' : self.meta_start}
def read(self, htmlfile, populator):
self.populator = populator
self.state = self.IGNORE
self.current_row = None
self.current_cell = None
for line in htmlfile.readlines():
# Only decode if not already unicode (Python 3 str).
# 2to3 changes `unicode` to `str`.
if type(line) is not unicode:
line = self._decode(line)
self.feed(line)
# Calling close is required by the HTMLParser but may cause problems
# if the same instance of our HtmlParser is reused. Currently it's
# used only once so there's no problem.
self.close()
self.populator.eof()
def _decode(self, line):
return line.decode(self._encoding)
def handle_starttag(self, tag, attrs):
handler = self._handlers.get(tag+'_start')
if handler is not None:
handler(attrs)
def handle_endtag(self, tag):
handler = self._handlers.get(tag+'_end')
if handler is not None:
handler()
def handle_data(self, data):
if self.state == self.IGNORE or self.current_cell is None:
return
if NON_BREAKING_SPACE in data:
data = data.replace(NON_BREAKING_SPACE, ' ')
self.current_cell.append(data)
def handle_entityref(self, name):
value = self._handle_entityref(name)
self.handle_data(value)
def _handle_entityref(self, name):
if name == 'apos': # missing from entitydefs
return "'"
try:
value = entitydefs[name]
except KeyError:
return '&'+name+';'
if value.startswith('&#'):
return unichr(int(value[2:-1]))
# Only decode if not already unicode (Python 3 str).
# 2to3 changes `unicode` to `str`.
if type(value) is not unicode:
value = value.decode('ISO-8859-1')
return value
def handle_charref(self, number):
value = self._handle_charref(number)
self.handle_data(value)
def _handle_charref(self, number):
if number.startswith(('x', 'X')):
base = 16
number = number[1:]
else:
base = 10
try:
return unichr(int(number, base))
except ValueError:
return '&#'+number+';'
def unknown_decl(self, data):
# Ignore everything even if it's invalid. This kind of stuff comes
# at least from MS Excel
pass
def table_start(self, attrs=None):
self.state = self.INITIAL
self.current_row = None
self.current_cell = None
def table_end(self):
if self.current_row is not None:
self.tr_end()
self.state = self.IGNORE
def tr_start(self, attrs=None):
if self.current_row is not None:
self.tr_end()
self.current_row = []
def tr_end(self):
if self.current_row is None:
return
if self.current_cell is not None:
self.td_end()
if self.state == self.INITIAL:
accepted = self.populator.start_table(self.current_row)
self.state = self.PROCESS if accepted else self.IGNORE
elif self.state == self.PROCESS:
self.populator.add(self.current_row)
self.current_row = None
def td_start(self, attrs=None):
if self.current_cell is not None:
self.td_end()
if self.current_row is None:
self.tr_start()
self.current_cell = []
def td_end(self):
if self.current_cell is not None and self.state != self.IGNORE:
cell = ''.join(self.current_cell)
self.current_row.append(cell)
self.current_cell = None
def br_start(self, attrs=None):
self.handle_data('\n')
def meta_start(self, attrs):
encoding = self._get_encoding_from_meta(attrs)
if encoding:
self._encoding = encoding
def _get_encoding_from_meta(self, attrs):
valid_http_equiv = False
encoding = None
for name, value in attrs:
name = name.lower()
if name == 'charset': # html5
return value
if name == 'http-equiv' and value.lower() == 'content-type':
valid_http_equiv = True
if name == 'content':
encoding = self._get_encoding_from_content_attr(value)
return encoding if valid_http_equiv else None
def _get_encoding_from_content_attr(self, value):
for token in value.split(';'):
token = token.strip()
if token.lower().startswith('charset='):
return token[8:]
def handle_pi(self, data):
encoding = self._get_encoding_from_pi(data)
if encoding:
self._encoding = encoding
def _get_encoding_from_pi(self, data):
data = data.strip()
if not data.lower().startswith('xml '):
return None
if data.endswith('?'):
data = data[:-1]
for token in data.split():
if token.lower().startswith('encoding='):
encoding = token[9:]
if encoding.startswith("'") or encoding.startswith('"'):
encoding = encoding[1:-1]
return encoding
return None
| |
import copy
import os
from bgwgen import qe
fixtures_dir = os.path.join('tests', 'fixtures', '1-qe')
config = {
'&control': {
'prefix': '\'MoS2\'',
'pseudo_dir': '\'../pseudo\'',
},
'&system': {
'ibrav': '4',
'celldm(1)': '3.169',
'celldm(3)': '20.0',
'ecutwfc': '45.0',
'nbnd': '100',
},
'ATOMIC_POSITIONS': {
'option': 'angstrom',
'value': ('\n'
'Mo 1.5845 0.9148 3.0810\n'
'S 0.0000 1.8296 1.5158\n'
'S 0.0000 1.8296 4.6461\n'
)
},
'ATOMIC_SPECIES': {
'value': ('\n'
'Mo 95.95 Mo.UPF\n'
'S 32.06 S.UPF'
)
},
'K_POINTS': {
'value': '12 12 1 1 1 0',
},
'K_POINTS_bands': {
'option': 'crystal_b',
'value': ('\n'
'4\n'
'0.000 0.000 0.000 50\n'
'0.500 0.000 0.000 50\n'
'0.333 0.333 0.000 50\n'
'0.000 0.000 0.000 0'
)
},
'kgrid': {
'q-shift': '0.001 0.0 0.0',
'cell': ('1.0 0.0 0.0\n'
'0.0 1.0 0.0\n'
'0.0 0.0 1.0'
)
},
'pp_in': {
'vxc_diag_nmin': '1',
'vxc_diag_nmax': '44',
},
}
def test_namelist_block():
"""Returns the correct string for the control namelist."""
expect = ('&control\n'
'prefix = \'MoS2\'\n'
'pseudo_dir = \'../pseudo\'\n'
'/\n'
)
assert qe.namelist_block(config, '&control') == expect
def test_card_block():
"""Returns the correct string for the specified card or an empty string if
card does not exists in the config."""
card = 'ATOMIC_POSITIONS'
expected = '{} angstrom\n{}\n'.format(card, config[card]['value'].strip())
assert qe.card_block(config, card) == expected
assert qe.card_block({}, 'foo') == ''
def test_create_link_files(tmpdir):
"""Creates a 'link-files' executable bash script."""
qe.create_link_files(config, tmpdir.realpath())
with open(os.path.join(fixtures_dir, 'link-files.expected'), 'r') as f:
assert tmpdir.join('link-files').read() == f.read()
def test_create_kgrid_in(tmpdir):
"""Creates an 'kgrid.in' input file with the correct config."""
qe.create_kgrid_in(config, tmpdir.realpath())
with open(os.path.join(fixtures_dir, 'kgrid.in.expected'), 'r') as f:
assert tmpdir.join('kgrid.in').read() == f.read()
def test_create_in(tmpdir):
"""Creates an 'in' input file with the correct config."""
c = copy.deepcopy(config)
c['K_POINTS']['option'] = 'automatic'
qe.create_in(c, tmpdir.realpath())
with open(os.path.join(fixtures_dir, 'create_in.expected'), 'r') as f:
assert tmpdir.join('in').read() == f.read()
def test_create_pp_in(tmpdir):
"""Creates an 'pp_in' input file with the correct config."""
qe.create_pp_in(config, tmpdir.realpath())
with open(os.path.join(fixtures_dir, 'create_pp_in.expected'), 'r') as f:
assert tmpdir.join('pp_in').read() == f.read()
def test_create_scf(tmpdir):
"""Creates a new directory '1-scf' and all its input files."""
dirname = '1-scf'
d = tmpdir.join(dirname)
expected_dir = os.path.join(fixtures_dir, dirname)
qe.create_scf(config, tmpdir.realpath())
with open(os.path.join(expected_dir, 'in.expected'), 'r') as f:
assert d.join('in').read() == f.read()
with open(os.path.join(expected_dir, 'clean.expected'), 'r') as f:
assert d.join('clean').read() == f.read()
def test_create_wfn(tmpdir):
"""Creates a new directory '2-wfn' and all its input files."""
dirname = '2-wfn'
d = tmpdir.join(dirname)
expected_dir = os.path.join(fixtures_dir, dirname)
qe.create_wfn(config, tmpdir.realpath())
with open(os.path.join(expected_dir, 'kgrid.in.expected'), 'r') as f:
assert d.join('kgrid.in').read() == f.read()
with open(os.path.join(expected_dir, 'in.expected'), 'r') as f:
assert d.join('in').read() == f.read()
with open(os.path.join(expected_dir, 'pp_in.expected'), 'r') as f:
assert d.join('pp_in').read() == f.read()
with open(os.path.join(expected_dir, 'get-kgrid.expected'), 'r') as f:
assert d.join('get-kgrid').read() == f.read()
with open(os.path.join(expected_dir, 'clean.expected'), 'r') as f:
assert d.join('clean').read() == f.read()
def test_create_wfnq(tmpdir):
"""Creates a new directory '3-wfnq' and all its input files."""
dirname = '3-wfnq'
d = tmpdir.join(dirname)
expected_dir = os.path.join(fixtures_dir, dirname)
qe.create_wfnq(config, tmpdir.realpath())
with open(os.path.join(expected_dir, 'kgrid.in.expected'), 'r') as f:
assert d.join('kgrid.in').read() == f.read()
with open(os.path.join(expected_dir, 'in.expected'), 'r') as f:
assert d.join('in').read() == f.read()
with open(os.path.join(expected_dir, 'pp_in.expected'), 'r') as f:
assert d.join('pp_in').read() == f.read()
def test_create_wfn_co(tmpdir):
"""Creates a new directory '4-wfn_co' and all its input files."""
dirname = '4-wfn_co'
d = tmpdir.join(dirname)
expected_dir = os.path.join(fixtures_dir, dirname)
qe.create_wfn_co(config, tmpdir.realpath())
with open(os.path.join(expected_dir, 'kgrid.in.expected'), 'r') as f:
assert d.join('kgrid.in').read() == f.read()
with open(os.path.join(expected_dir, 'in.expected'), 'r') as f:
assert d.join('in').read() == f.read()
with open(os.path.join(expected_dir, 'pp_in.expected'), 'r') as f:
assert d.join('pp_in').read() == f.read()
def test_create_wfn_fi(tmpdir):
"""Creates a new directory '5-wfn_fi' and all its input files."""
dirname = '5-wfn_fi'
d = tmpdir.join(dirname)
expected_dir = os.path.join(fixtures_dir, dirname)
qe.create_wfn_fi(config, tmpdir.realpath())
with open(os.path.join(expected_dir, 'kgrid.in.expected'), 'r') as f:
assert d.join('kgrid.in').read() == f.read()
with open(os.path.join(expected_dir, 'in.expected'), 'r') as f:
assert d.join('in').read() == f.read()
with open(os.path.join(expected_dir, 'pp_in.expected'), 'r') as f:
assert d.join('pp_in').read() == f.read()
def test_create_wfnq_fi(tmpdir):
"""Creates a new directory '6-wfnq_fi' and all its input files."""
dirname = '6-wfnq_fi'
d = tmpdir.join(dirname)
expected_dir = os.path.join(fixtures_dir, dirname)
qe.create_wfnq_fi(config, tmpdir.realpath())
with open(os.path.join(expected_dir, 'kgrid.in.expected'), 'r') as f:
assert d.join('kgrid.in').read() == f.read()
with open(os.path.join(expected_dir, 'in.expected'), 'r') as f:
assert d.join('in').read() == f.read()
with open(os.path.join(expected_dir, 'pp_in.expected'), 'r') as f:
assert d.join('pp_in').read() == f.read()
def test_create_bands(tmpdir):
"""Creates a new directory '7-bands' and all its input files."""
dirname = '7-bands'
d = tmpdir.join(dirname)
expected_dir = os.path.join(fixtures_dir, dirname)
qe.create_bands(config, tmpdir.realpath())
with open(os.path.join(expected_dir, 'in.expected'), 'r') as f:
assert d.join('in').read() == f.read()
with open(os.path.join(expected_dir, 'pp_in.expected'), 'r') as f:
assert d.join('pp_in').read() == f.read()
def test_create_qe(tmpdir):
"""Create a new directory '1-qe' and all its directories."""
qedir = tmpdir.join('1-qe')
qe.create_qe(config, tmpdir.realpath())
assert os.path.isdir(qedir)
assert os.path.isfile(qedir.join('link-files'))
assert os.path.isdir(qedir)
assert os.path.isdir(qedir.join('1-scf'))
assert os.path.isdir(qedir.join('2-wfn'))
assert os.path.isdir(qedir.join('3-wfnq'))
assert os.path.isdir(qedir.join('4-wfn_co'))
assert os.path.isdir(qedir.join('5-wfn_fi'))
assert os.path.isdir(qedir.join('6-wfnq_fi'))
assert os.path.isdir(qedir.join('7-bands'))
| |
#!/usr/bin/env python
__author__ = "Gabriel Bassett"
"""
AUTHOR: {0}
DATE: <DATE>
DEPENDENCIES: <a list of modules requiring installation>
Copyright <YEAR> {0}
LICENSE:
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
DESCRIPTION:
<ENTER DESCRIPTION>
""".format(__author__)
# PRE-USER SETUP
pass
########### NOT USER EDITABLE ABOVE THIS POINT #################
# USER VARIABLES
PLUGIN_CONFIG_FILE = "page_rank_2.yapsy-plugin"
NAME = "PageRank2"
########### NOT USER EDITABLE BELOW THIS POINT #################
## IMPORTS
from yapsy.IPlugin import IPlugin
import logging
import networkx as nx
from datetime import datetime # timedelta imported above
import uuid
import ConfigParser
import inspect
## SETUP
loc = inspect.getfile(inspect.currentframe())
ind = loc.rfind("/")
loc = loc[:ind+1]
config = ConfigParser.SafeConfigParser()
config.readfp(open(loc + PLUGIN_CONFIG_FILE))
if config.has_section('Core'):
if 'name' in config.options('Core'):
NAME = config.get('Core', 'name')
if config.has_section('Log'):
if 'level' in config.options('Log'):
LOGLEVEL = config.get('Log', 'level')
if 'file' in config.options('Log'):
LOGFILE = config.get('Log', 'file')
## EXECUTION
class PluginOne(IPlugin):
# TODO: The init should contain anything to load modules or data files that should be variables of the plugin object
def __init__(self):
pass
# TODO: Configuration needs to set the values needed to identify the plugin in the plugin database as well as ensure everyhing loaded correctly
# TODO: Current layout is for an enrichment plugin
# TODO: enrichment [type, successful_load, name, description, inputs to enrichment such as 'ip', cost, speed]
# TODO: interface [type, successful_load, name]
# TODO: query [TBD]
# TODO: minion [TBD]
def configure(self):
"""
:return: return list of [configure success (bool), name, description, list of acceptable inputs, resource cost (1-10, 1=low), speed (1-10, 1=fast)]
"""
config_options = config.options("Configuration")
if 'cost' in config_options:
cost = config.get('Configuration', 'cost')
else:
cost = 9999
if 'speed' in config_options:
speed = config.get('Configuration', 'speed')
else:
speed = 9999
if config.has_section('Documentation') and 'description' in config.options('Documentation'):
description = config.get('Documentation', 'description')
else:
logging.error("'Description not in config file.")
return [None, False, NAME, None, cost, speed]
if 'type' in config_options:
plugin_type = config.get('Configuration', 'type')
else:
logging.error("'Type' not specified in config file.")
return [None, False, NAME, description, cost, speed]
return [plugin_type, True, NAME, description, cost, speed]
def score(self, sg, topic, personalization=None): # get_pagerank_probability_2
"""
:param sg: egocentric subgraph around topic in networkx format
:param topic: A factor for degrading as distance from the topic increases
:param personalization: Dictionary with key of a node and value of a node weight. If none specified, defaults to the linear weight of the 'topic_distance' feature of the nodes. The topic_distance is the topic for which the subgraph was generated.
:return: Dictionary of probabilities keyed by node
"""
if sg.is_multigraph():
sg = self.multigraph_to_digraph(sg)
if personalization == None:
personalization = {}
for node in sg.nodes():
# personalized[node] = linear_weight(sg.node[node]['topic_distance'], distance_degradation)
# INSERT WEIGHTING FUNCTION BELOW
personalization[node] = self.linear_weight(sg.node[node]['topic_distance'])
# Build topic weights to start topic with all weight and always jump to topic
topic_weight = 1/float(len(topic.nodes()))
topic_weighted = {k if 1 else k: topic_weight if k in topic.nodes() else 0 for k in sg.nodes()}
# return the pagerank scores
return nx.pagerank(sg,
personalization=personalization,
weight='confidence',
nstart=topic_weighted,
dangling=topic_weighted)
def multigraph_to_digraph(self, g):
"""
:param g: takes a networkx mulitgraph
:return: returns a networkx digraph with edge weights representing the number of edges
NOTE: This butchers duplicate edge properties. If converting to score, use original edges in output.
"""
G = nx.DiGraph()
edge_attributes = {}
# if g isn't really a multigraph, just return it
if not g.is_multigraph():
return g
# collapse down to a diagraph
G.add_nodes_from(g.nodes(data=True))
G.add_edges_from(g.edges(data=True))
# for each edge, weight the confidence by the number of edges
'''
# captures a multiple of the confidence on the edge in the output graph
for edge in G.edges():
count = g.edges().count(edge)
if "count" > 1:
if "confidence" in G.edge[edge[0]][edge[1]]:
G.edge[edge[0]][edge[1]]['confidence'] *= count
else:
G.edge[edge[0]][edge[1]]["confidence"] = count
'''
# Captures every confidence
for edge in G.edges():
confidence = 0
for src_edge in g.edge[edge[0]][edge[1]].values():
confidence += src_edge.get('confidence', 1)
G.edge[edge[0]][edge[1]]['confidence'] = confidence
# # collapse down to a diagraph
# G.add_nodes_from(g.nodes(data=True))
# G.add_edges_from(g.edges(data=True))
return G
### DISTANCE WEIGHTS ###
def linear_weight(self, distance, ddp=.2):
"""
:param distance: distance from topic
:param ddp: percentage to degrade
:return: Linear weighting factor as float
"""
return 1 - (distance * ddp)
def log_weight(self, distance, a=1, b=1, n=3, pwr=1):
"""
:param distance: distance: distance from topic
:param a: constant to shape graph. Adjusts hight at 0 = a / (1 + b)
:param b: constant to shape graph.
:param n: constant to shape graph.
:param pwr: constant to shape graph.
:return: log weighting factor as float
"""
return a / (1 + b*np.exp((distance-n) * pwr))
def exponential_weight(self, distance, b=2):
return np.exp(-distance/b)
def normal_weight(self, distance, pwr=2, a=1.1, b=10, c=1):
"""
:param distance: distance from topic
:param pwr: constant to shape graph. Higher = steeper decline
:param b: constant to shape graph. lower = greater spread
:return: normal weighting factor as float
pwr = 2.5, a = 1, c = 0, b = 30
"""
return a * np.exp(-(distance + c)**pwr/b)
| |
"""Directory-related unit tests for AD Connect Directory Services.
The logic to check the details of VPCs and Subnets is shared between the
"create directory" APIs, so it will not be repeated here.
"""
from datetime import datetime, timezone
import boto3
from botocore.exceptions import ClientError
import pytest
from moto import mock_ds
from moto.core.utils import get_random_hex
from moto.ec2 import mock_ec2
from .test_ds_simple_ad_directory import TEST_REGION, create_vpc, create_subnets
def create_test_ad_connector(
ds_client,
ec2_client,
vpc_settings=None,
customer_dns_ips=None,
customer_user_name="Admin",
tags=None,
): # pylint: disable=too-many-arguments
"""Return ID of a newly created valid directory."""
if not vpc_settings:
good_vpc_id = create_vpc(ec2_client)
good_subnet_ids = create_subnets(ec2_client, good_vpc_id)
vpc_settings = {"VpcId": good_vpc_id, "SubnetIds": good_subnet_ids}
if not customer_dns_ips:
customer_dns_ips = ["1.2.3.4", "5.6.7.8"]
if not tags:
tags = []
result = ds_client.connect_directory(
Name=f"test-{get_random_hex(6)}.test",
Password="4ADConnectPassword",
Size="Small",
ConnectSettings={
"VpcId": vpc_settings["VpcId"],
"SubnetIds": vpc_settings["SubnetIds"],
"CustomerDnsIps": customer_dns_ips,
"CustomerUserName": customer_user_name,
},
Tags=tags,
)
return result["DirectoryId"]
@mock_ds
def test_ds_connect_directory_validations():
"""Test validation errs that aren't caught by botocore.
Most of this validation is shared with the Simple AD directory, but
this verifies that it is invoked from connect_directory().
"""
client = boto3.client("ds", region_name=TEST_REGION)
random_num = get_random_hex(6)
# Verify ValidationException error messages are accumulated properly.
bad_name = f"bad_name_{random_num}"
bad_password = "bad_password"
bad_size = "foo"
ok_connect_settings = {
"VpcId": f"vpc-{random_num}",
"SubnetIds": [f"subnet-{random_num}01", f"subnet-{random_num}02"],
"CustomerUserName": "foo",
"CustomerDnsIps": ["1.2.3.4"],
}
with pytest.raises(ClientError) as exc:
client.connect_directory(
Name=bad_name,
Password=bad_password,
Size=bad_size,
ConnectSettings=ok_connect_settings,
)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert "3 validation errors detected" in err["Message"]
assert (
r"Value at 'password' failed to satisfy constraint: "
r"Member must satisfy regular expression pattern: "
r"^(?=^.{8,64}$)((?=.*\d)(?=.*[A-Z])(?=.*[a-z])|"
r"(?=.*\d)(?=.*[^A-Za-z0-9\s])(?=.*[a-z])|"
r"(?=.*[^A-Za-z0-9\s])(?=.*[A-Z])(?=.*[a-z])|"
r"(?=.*\d)(?=.*[A-Z])(?=.*[^A-Za-z0-9\s]))^.*$" in err["Message"]
)
assert (
f"Value '{bad_size}' at 'size' failed to satisfy constraint: "
f"Member must satisfy enum value set: [Small, Large]" in err["Message"]
)
assert (
fr"Value '{bad_name}' at 'name' failed to satisfy constraint: "
fr"Member must satisfy regular expression pattern: "
fr"^([a-zA-Z0-9]+[\.-])+([a-zA-Z0-9])+$" in err["Message"]
)
too_long = (
"Test of directory service 0123456789 0123456789 0123456789 "
"0123456789 0123456789 0123456789 0123456789 0123456789 0123456789 "
"0123456789 0123456789"
)
short_name = "a:b.c"
with pytest.raises(ClientError) as exc:
client.connect_directory(
Name=f"test{random_num}.test",
Password="TESTfoobar1",
ConnectSettings=ok_connect_settings,
Description=too_long,
ShortName=short_name,
Size="Small",
)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert "2 validation errors detected" in err["Message"]
assert (
f"Value '{too_long}' at 'description' failed to satisfy constraint: "
f"Member must have length less than or equal to 128" in err["Message"]
)
pattern = r'^[^\/:*?"<>|.]+[^\/:*?"<>|]*$'
assert (
f"Value '{short_name}' at 'shortName' failed to satisfy constraint: "
f"Member must satisfy regular expression pattern: " + pattern
) in err["Message"]
bad_connect_settings = {
"VpcId": f"vpc-{random_num}",
"SubnetIds": ["foo"],
"CustomerUserName": "foo",
"CustomerDnsIps": ["1.2.3.4"],
}
with pytest.raises(ClientError) as exc:
client.connect_directory(
Name=f"test{random_num}.test",
Password="TESTfoobar1",
ConnectSettings=bad_connect_settings,
Size="Small",
)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert "1 validation error detected" in err["Message"]
assert (
fr"Value '['{bad_connect_settings['SubnetIds'][0]}']' at "
fr"'connectSettings.vpcSettings.subnetIds' failed to satisfy "
fr"constraint: Member must satisfy regular expression pattern: "
fr"^(subnet-[0-9a-f]{{8}}|subnet-[0-9a-f]{{17}})$" in err["Message"]
)
@mock_ec2
@mock_ds
def test_ds_connect_directory_good_args():
"""Test creation of AD connect directory using good arguments."""
client = boto3.client("ds", region_name=TEST_REGION)
ec2_client = boto3.client("ec2", region_name=TEST_REGION)
# Verify a good call to connect_directory()
directory_id = create_test_ad_connector(client, ec2_client)
assert directory_id.startswith("d-")
# Verify that too many directories can't be created.
limits = client.get_directory_limits()["DirectoryLimits"]
for _ in range(limits["ConnectedDirectoriesLimit"]):
create_test_ad_connector(client, ec2_client)
with pytest.raises(ClientError) as exc:
create_test_ad_connector(client, ec2_client)
err = exc.value.response["Error"]
assert err["Code"] == "DirectoryLimitExceededException"
assert (
f"Directory limit exceeded. A maximum of "
f"{limits['ConnectedDirectoriesLimit']} "
f"directories may be created" in err["Message"]
)
@mock_ec2
@mock_ds
def test_ds_connect_directory_bad_args():
"""Test validation of non-vpc related ConnectionSettings values."""
client = boto3.client("ds", region_name=TEST_REGION)
ec2_client = boto3.client("ec2", region_name=TEST_REGION)
# Bad CustomerUserName.
bad_username = "oops$"
with pytest.raises(ClientError) as exc:
create_test_ad_connector(client, ec2_client, customer_user_name=bad_username)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert "1 validation error detected" in err["Message"]
assert (
fr"Value '{bad_username}' at 'connectSettings.customerUserName' "
fr"failed to satisfy constraint: Member must satisfy regular "
fr"expression pattern: ^[a-zA-Z0-9._-]+$" in err["Message"]
)
# Bad CustomerDnsIps.
bad_dns_ip = ["1.2.3.450"]
with pytest.raises(ClientError) as exc:
create_test_ad_connector(client, ec2_client, customer_dns_ips=bad_dns_ip)
err = exc.value.response["Error"]
assert err["Code"] == "ValidationException"
assert "1 validation error detected" in err["Message"]
assert (
fr"Value '{bad_dns_ip}' at 'connectSettings.customerDnsIps' "
fr"failed to satisfy constraint: Member must satisfy regular "
fr"expression pattern: ^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.)"
fr"{{3}}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" in err["Message"]
)
@mock_ec2
@mock_ds
def test_ds_connect_directory_delete():
"""Test deletion of AD Connector directory."""
client = boto3.client("ds", region_name=TEST_REGION)
ec2_client = boto3.client("ec2", region_name=TEST_REGION)
# Delete an existing directory.
directory_id = create_test_ad_connector(client, ec2_client)
result = client.delete_directory(DirectoryId=directory_id)
assert result["DirectoryId"] == directory_id
@mock_ec2
@mock_ds
def test_ds_connect_directory_describe():
"""Test describe_directory() for AD Connector directory."""
client = boto3.client("ds", region_name=TEST_REGION)
ec2_client = boto3.client("ec2", region_name=TEST_REGION)
# Test that if no directory IDs are specified, all are returned.
directory_id = create_test_ad_connector(client, ec2_client)
result = client.describe_directories()
directory = result["DirectoryDescriptions"][0]
assert len(result["DirectoryDescriptions"]) == 1
assert directory["DesiredNumberOfDomainControllers"] == 0
assert not directory["SsoEnabled"]
assert directory["DirectoryId"] == directory_id
assert directory["Name"].startswith("test-")
assert directory["Alias"] == directory_id
assert directory["AccessUrl"] == f"{directory_id}.awsapps.com"
assert directory["Stage"] == "Active"
assert directory["LaunchTime"] <= datetime.now(timezone.utc)
assert directory["StageLastUpdatedDateTime"] <= datetime.now(timezone.utc)
assert directory["Type"] == "ADConnector"
assert directory["ConnectSettings"]["VpcId"].startswith("vpc-")
assert len(directory["ConnectSettings"]["SubnetIds"]) == 2
assert directory["ConnectSettings"]["CustomerUserName"] == "Admin"
assert len(directory["ConnectSettings"]["ConnectIps"]) == 2
assert directory["Size"] == "Small"
assert set(directory["DnsIpAddrs"]) == set(["1.2.3.4", "5.6.7.8"])
assert "NextToken" not in result
@mock_ec2
@mock_ds
def test_ds_connect_directory_tags():
"""Test that directory tags can be added and retrieved."""
client = boto3.client("ds", region_name=TEST_REGION)
ec2_client = boto3.client("ec2", region_name=TEST_REGION)
added_tags = [{"Key": f"{x}", "Value": f"{x}"} for x in range(10)]
directory_id = create_test_ad_connector(client, ec2_client, tags=added_tags)
result = client.list_tags_for_resource(ResourceId=directory_id)
assert len(result["Tags"]) == 10
assert result["Tags"] == added_tags
@mock_ec2
@mock_ds
def test_ds_get_connect_directory_limits():
"""Test return value for ad connector directory limits."""
client = boto3.client("ds", region_name=TEST_REGION)
ec2_client = boto3.client("ec2", region_name=TEST_REGION)
# Create a bunch of directories and verify the current count has been
# updated.
limits = client.get_directory_limits()["DirectoryLimits"]
for _ in range(limits["ConnectedDirectoriesLimit"]):
create_test_ad_connector(client, ec2_client)
limits = client.get_directory_limits()["DirectoryLimits"]
assert (
limits["ConnectedDirectoriesLimit"]
== limits["ConnectedDirectoriesCurrentCount"]
)
assert limits["ConnectedDirectoriesLimitReached"]
assert not limits["CloudOnlyDirectoriesCurrentCount"]
assert not limits["CloudOnlyMicrosoftADCurrentCount"]
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cinderclient import exceptions
from cinderclient import service_catalog
from cinderclient.tests.unit import utils
# Taken directly from keystone/content/common/samples/auth.json
# Do not edit this structure. Instead, grab the latest from there.
SERVICE_CATALOG = {
"access": {
"token": {
"id": "ab48a9efdfedb23ty3494",
"expires": "2010-11-01T03:32:15-05:00",
"tenant": {
"id": "345",
"name": "My Project"
}
},
"user": {
"id": "123",
"name": "jqsmith",
"roles": [
{
"id": "234",
"name": "compute:admin",
},
{
"id": "235",
"name": "object-store:admin",
"tenantId": "1",
}
],
"roles_links": [],
},
"serviceCatalog": [
{
"name": "Cloud Servers",
"type": "compute",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://compute1.host/v1/1234",
"internalURL": "https://compute1.host/v1/1234",
"region": "North",
"versionId": "1.0",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
{
"tenantId": "2",
"publicURL": "https://compute1.host/v1/3456",
"internalURL": "https://compute1.host/v1/3456",
"region": "North",
"versionId": "1.1",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
],
"endpoints_links": [],
},
{
"name": "Cinder Volume Service",
"type": "volume",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v1/1234",
"internalURL": "https://volume1.host/v1/1234",
"region": "South",
"versionId": "1.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v1/3456",
"internalURL": "https://volume1.host/v1/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v1/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
"href": "https://identity1.host/v2.0/endpoints"
},
],
},
{
"name": "Cinder Volume Service V2",
"type": "volumev2",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v2/1234",
"internalURL": "https://volume1.host/v2/1234",
"region": "South",
"versionId": "2.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v2/3456",
"internalURL": "https://volume1.host/v2/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v2/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
"href": "https://identity1.host/v2.0/endpoints"
},
],
},
],
"serviceCatalog_links": [
{
"rel": "next",
"href": "https://identity.host/v2.0/endpoints?session=2hfh8Ar",
},
],
},
}
SERVICE_COMPATIBILITY_CATALOG = {
"access": {
"token": {
"id": "ab48a9efdfedb23ty3494",
"expires": "2010-11-01T03:32:15-05:00",
"tenant": {
"id": "345",
"name": "My Project"
}
},
"user": {
"id": "123",
"name": "jqsmith",
"roles": [
{
"id": "234",
"name": "compute:admin",
},
{
"id": "235",
"name": "object-store:admin",
"tenantId": "1",
}
],
"roles_links": [],
},
"serviceCatalog": [
{
"name": "Cloud Servers",
"type": "compute",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://compute1.host/v1/1234",
"internalURL": "https://compute1.host/v1/1234",
"region": "North",
"versionId": "1.0",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
{
"tenantId": "2",
"publicURL": "https://compute1.host/v1/3456",
"internalURL": "https://compute1.host/v1/3456",
"region": "North",
"versionId": "1.1",
"versionInfo": "https://compute1.host/v1/",
"versionList": "https://compute1.host/"
},
],
"endpoints_links": [],
},
{
"name": "Cinder Volume Service V2",
"type": "volume",
"endpoints": [
{
"tenantId": "1",
"publicURL": "https://volume1.host/v2/1234",
"internalURL": "https://volume1.host/v2/1234",
"region": "South",
"versionId": "2.0",
"versionInfo": "uri",
"versionList": "uri"
},
{
"tenantId": "2",
"publicURL": "https://volume1.host/v2/3456",
"internalURL": "https://volume1.host/v2/3456",
"region": "South",
"versionId": "1.1",
"versionInfo": "https://volume1.host/v2/",
"versionList": "https://volume1.host/"
},
],
"endpoints_links": [
{
"rel": "next",
"href": "https://identity1.host/v2.0/endpoints"
},
],
},
],
"serviceCatalog_links": [
{
"rel": "next",
"href": "https://identity.host/v2.0/endpoints?session=2hfh8Ar",
},
],
},
}
class ServiceCatalogTest(utils.TestCase):
def test_building_a_service_catalog(self):
sc = service_catalog.ServiceCatalog(SERVICE_CATALOG)
self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for,
service_type='compute')
self.assertEqual("https://compute1.host/v1/1234",
sc.url_for('tenantId', '1', service_type='compute'))
self.assertEqual("https://compute1.host/v1/3456",
sc.url_for('tenantId', '2', service_type='compute'))
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
"region", "South", service_type='compute')
def test_alternate_service_type(self):
sc = service_catalog.ServiceCatalog(SERVICE_CATALOG)
self.assertRaises(exceptions.AmbiguousEndpoints, sc.url_for,
service_type='volume')
self.assertEqual("https://volume1.host/v1/1234",
sc.url_for('tenantId', '1', service_type='volume'))
self.assertEqual("https://volume1.host/v1/3456",
sc.url_for('tenantId', '2', service_type='volume'))
self.assertEqual("https://volume1.host/v2/3456",
sc.url_for('tenantId', '2', service_type='volumev2'))
self.assertEqual("https://volume1.host/v2/3456",
sc.url_for('tenantId', '2', service_type='volumev2'))
self.assertRaises(exceptions.EndpointNotFound, sc.url_for,
"region", "North", service_type='volume')
def test_compatibility_service_type(self):
sc = service_catalog.ServiceCatalog(SERVICE_COMPATIBILITY_CATALOG)
self.assertEqual("https://volume1.host/v2/1234",
sc.url_for('tenantId', '1', service_type='volume'))
self.assertEqual("https://volume1.host/v2/3456",
sc.url_for('tenantId', '2', service_type='volume'))
| |
# -*- coding: UTF-8 -*-
import os, sys
import datetime
import time
from redis_model.redis_client import RedisClient
from redis_model.models.mongo_utils import *
class MAttribute(object):
redis = RedisClient.getInstance().redis
def __init__(self):
""" intialize base object reference object description """
self.bo = None
self.ref = None
self.descrpition = ""
@property
def ref_klass(self):
"""
Reference the object
return:
the object of self's Reference
"""
from mongoengine.document import Document, EmbeddedDocument
if self.ref:
_known_models = {}
for klass in Document.__subclasses__():
if hasattr(klass, "objects"):
_known_models[klass.__name__] = klass
for sub in klass.__subclasses__():
if hasattr(sub, "objects"):
_known_models[sub.__name__] = sub
for _sub in sub.__subclasses__():
if hasattr(_sub, "objects"):
_known_models[_sub.__name__] = _sub
return _known_models.get(self.ref, None)
def set(self, instance, val):
"""
set the object's name value
param:
instance:the name type is string
val: the value type is string
"""
setattr(instance, self.name, val)
def __set__(self, instance, val):
"""
set the object's name value
param:
instance:the name type is string
val: the value type is string
"""
setattr(instance, "_" + self.name, val)
def acceptable_types(self):
"""
get the basestring it is python
return:
string
"""
return basestring
def validate(self, instance):
"""
validate the effective of data
param:
instance:object
"""
if self.required:
if not self:
instance._errors.append("%s require" % self)
@operGet
def delete(self, key, **kwargs ):
""" drop redis all tyep of data """
pipe = self.redis.pipeline()
pipe.delete(key)
pipe.delete(self.get_field_key(key))
pipe.execute()
print "delete key", key, self.get_field_key(key)
#pipe.delete(self.get_member_key(key, "save_high_score"))
#print "delete key" , key ,self.get_field_key(key)
def get_field_key(self, key):
return "%s_field" % key
def get_member_key(self, member, field):
return "%s_%s" % (member, field)
class MSortSetField(MAttribute):
def __init__(self, ref=None, required=False, name=None, limit=0):
"""
initialize name index reference object limit
param:
ref:reference object
required:True or False
name:string default is None
limit:integer default is 20000 ,0 is no limit
"""
super(MAttribute, self).__init__()
self.name = name
self.ref = ref
self.required = required
self.limit = limit
self.delete_factor = 1.5
@operSet
def zadd(self, key, member, score, **kwargs):
"""
add the member into the sorted set by score
if the member is exist then update it's score
param:
key:string
member:string
score:rank integer
**kwargs:include obj and baseobj
obj:object
baseobj:base object
return:
True or False
"""
try:
save_high_score = kwargs.get("save_high_score", False)
if save_high_score:
sort_order = kwargs.get("sort_order", True)
hkey = self.get_field_key(key)
hmember = self.get_member_key(member, "save_high_score")
if sort_order:
last_score = self.redis.hget(hkey, hmember) or 0
else:
last_score = self.redis.hget(hkey, hmember) or sys.maxint
last_score = int(last_score)
if (sort_order and (score > last_score)) or (not sort_order and (score < last_score)):
self.redis.hset(hkey, hmember, score)
else:
return False
pipe = self.redis.pipeline()
pipe.zadd(key, member, score)
pipe.execute()
if self.limit > 0:
zcard = self.redis.zcard(key)
if zcard > self.limit * self.delete_factor:
delete_to = zcard - self.limit
self.redis.zremrangebyrank(key, 0, delete_to)
return True
except Exception, e:
print e
pipe.reset()
return False
@operGet
def zrank(self, key, member_id, **kwargs):
"""
get the the index of member in sorted set
in front is the lowest score
return:
integer
"""
r = self.redis.zrank(key, member_id)
if r != None:
r += 1
return r
@operGet
def zrevrank( self, key, member_id, **kwargs):
"""
get the the index of member in sorted set
in front is the highest score
return:
integer
"""
r = self.redis.zrevrank(key, member_id)
if r != None:
r = r + 1
return r
@operGet
def zrange(self, key, start=0, end=10, withscores=False, **kwargs):
"""
get the the member in sorted set between start and end
in front is the lowest score
return:
members of list
"""
data = self.redis.zrange(key, start, end, withscores=withscores) or []
if withscores:
pks = []
scores = {}
for d in data:
if d[0].find("_") > 0:
key = str(d[0]).split("_")[0]
else:
key = d[0]
pks.append(key)
scores[key] = d[1]
else:
pks = data
scores = {}
if kwargs.get("only_ids", False):
return pks
else:
return find_include(self.ref_klass, tuple(pks), scores, withscores)
@operGet
def zrevrange(self, key, start=0, end=10, **kwargs):
"""
get the the index of member in sorted set
in front is the lowest score highest in the back
return:
members of list
"""
withscores = kwargs.get("withscores", True)
data = self.redis.zrevrange(key, start, end, withscores=withscores) or []
scores = {}
if withscores:
pks = []
for d in data:
if d[0].find("_") > 0:
key = str(d[0]).split("_")[0]
else:
key = d[0]
pks.append(key)
scores[key] = d[1]
else:
pks = data
if kwargs.get("only_ids", False):
return pks
else:
return find_include(self.ref_klass, tuple(pks), scores, withscores)
@operGet
def zscore(self, key, member, **kwargs):
"""
get the score of member
return:
score
"""
return self.redis.zscore(key, member.id)
@operGet
def zcard(self, key, **kwargs ):
"""
get the base integer of sorted set
return:
count of list
"""
return self.redis.zcard(key)
@operSet
def zrem(self, key, member_id, **kwargs):
"""
delete the member in sorted set
return:
True or False
"""
try:
self.redis.zrem(key, member_id)
return True
except Exception, e:
return False
@operGet
def zremrangebyrank(self, key, min_rank=0, max_rank=1, **kwargs):
"""
maintain the size of list
pop one object every time
retrun:
True or False
"""
try:
self.redis.zremrangebyrank(key, min_rank, max_rank)
return True
except Exception, e:
return False
class MHashField(MAttribute):
def __init__(self, ref=None, required=False, name=None):
"""
initialize name index reference object limit
param:
ref:reference object
required:True or False
name:string default is None
limit:integer default is 20000 ,0 is no limit
"""
super(MAttribute, self).__init__()
self.name = name
self.ref = ref
self.required = required
@operSet
def hset(self, key, field, **kwargs):
"""
use User.hset(self, user1, 1)
"""
self.redis.hset(key, field, kwargs["value"])
return True
@operGet
def hget(self, key, field, **kwargs):
"""
use User.hget(self, user1)
"""
return self.redis.hget(key, str(getattr(field, "id")))
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'valerio cosentino'
from util.db_util import DbUtil
class GitDao():
"""
This class handles the persistence and retrieval of Git data
"""
def __init__(self, config, logger):
"""
:type config: dict
:param config: the DB configuration file
:type logger: Object
:param logger: logger
"""
try:
self._config = config
self._logger = logger
self._db_util = DbUtil()
self._cnx = self._db_util.get_connection(self._config)
except:
self._logger.error("GitDao init failed")
raise
def check_connection_alive(self):
try:
cursor = self._cnx.cursor()
cursor.execute("SELECT VERSION()")
results = cursor.fetchone()
ver = results[0]
cursor.close()
if not ver:
self._cnx = self._db_util.restart_connection(self._config, self._logger)
except:
self._cnx = self._db_util.restart_connection(self._config, self._logger)
def close_connection(self):
self._db_util.close_connection(self._cnx)
def restart_connection(self):
self._cnx = self._db_util.restart_connection(self._config, self._logger)
def get_connection(self):
return self._cnx
def get_cursor(self):
return self._cnx.cursor()
def close_cursor(self, cursor):
return cursor.close()
def fetchone(self, cursor):
return cursor.fetchone()
def execute(self, cursor, query, arguments):
cursor.execute(query, arguments)
def array2string(self, array):
return ','.join(str(x) for x in array)
def line_detail_table_is_empty(self, repo_id):
"""
checks line detail table is empty
:type repo_id: int
:param repo_id: id of an existing repository in the DB
"""
cursor = self._cnx.cursor()
query = "SELECT COUNT(*) " \
"FROM commit c " \
"JOIN file_modification fm ON c.id = fm.commit_id " \
"JOIN line_detail l ON fm.id = l.file_modification_id " \
"WHERE l.content IS NOT NULL AND repo_id = %s"
arguments = [repo_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
count = 0
if row:
count = int(row[0])
cursor.close()
return int(count > 0)
def file_modification_patch_is_empty(self, repo_id):
"""
checks patch column in file modification table is empty
:type repo_id: int
:param repo_id: id of an existing repository in the DB
"""
cursor = self._cnx.cursor()
query = "SELECT COUNT(*) " \
"FROM commit c " \
"JOIN file_modification fm ON c.id = fm.commit_id " \
"WHERE patch IS NOT NULL and repo_id = %s"
arguments = [repo_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
count = 0
if row:
count = int(row[0])
cursor.close()
return int(count > 0)
def get_last_commit_id(self, repo_id):
"""
gets last commit id
:type repo_id: int
:param repo_id: id of an existing repository in the DB
"""
found = None
cursor = self._cnx.cursor()
query = "SELECT MAX(id) as last_commit_id " \
"FROM commit c " \
"WHERE repo_id = %s"
arguments = [repo_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
cursor.close()
return found
def select_repo_id(self, repo_name):
"""
selects id of a repository by its name
:type repo_name: str
:param repo_name: name of an existing repository in the DB
"""
return self._db_util.select_repo_id(self._cnx, repo_name, self._logger)
def insert_repo(self, project_id, repo_name):
"""
inserts repository to DB
:type project_id: int
:param project_id: id of an existing project in the DB
:type repo_name: str
:param repo_name: name of a repository to insert
"""
return self._db_util.insert_repo(self._cnx, project_id, repo_name, self._logger)
def select_project_id(self, project_name):
"""
selects id of a project by its name
:type project_name: str
:param project_name: name of an existing project in the DB
"""
return self._db_util.select_project_id(self._cnx, project_name, self._logger)
def get_user_id(self, user_name, user_email):
"""
gets id of a user
:type user_name: str
:param user_name: name of the user
:type user_email: str
:param user_email: email of the user
"""
if not user_email and not user_name:
user_name = "uknonwn_user"
user_email = "uknonwn_user"
if user_email:
user_id = self._db_util.select_user_id_by_email(self._cnx, user_email, self._logger)
else:
user_id = self._db_util.select_user_id_by_name(self._cnx, user_name, self._logger)
if not user_id:
self._db_util.insert_user(self._cnx, user_name, user_email, self._logger)
user_id = self._db_util.select_user_id_by_email(self._cnx, user_email, self._logger)
return user_id
def insert_commit_parents(self, parents, commit_id, sha, repo_id):
"""
inserts commit parents to DB, one by one
:type parents: list of Object
:param parents: parents of a commit
:type commit_id: int
:param commit_id: id of the commit
:type sha: str
:param sha: SHA of the commit
:type repo_id: int
:param repo_id: id of the repository
"""
cursor = self._cnx.cursor()
for parent in parents:
parent_id = self.select_commit_id(parent.hexsha, repo_id)
if not parent_id:
self._logger.warning("parent commit id not found! SHA parent " + str(parent.hexsha))
query = "INSERT IGNORE INTO commit_parent " \
"VALUES (%s, %s, %s, %s, %s)"
if parent_id:
arguments = [repo_id, commit_id, sha, parent_id, parent.hexsha]
else:
arguments = [repo_id, commit_id, sha, None, parent.hexsha]
cursor.execute(query, arguments)
self._cnx.commit()
cursor.close()
def insert_all_commit_parents(self, parents, commit_id, sha, repo_id):
"""
inserts commit parents to DB all together
:type parents: list of Object
:param parents: parents of a commit
:type commit_id: int
:param commit_id: id of the commit
:type sha: str
:param sha: SHA of the commit
:type repo_id: int
:param repo_id: id of the repository
"""
to_insert = []
for parent in parents:
parent_id = self.select_commit_id(parent.hexsha, repo_id)
if not parent_id:
self._logger.warning("parent commit id not found! SHA parent " + str(parent.hexsha))
if parent_id:
to_insert.append((repo_id, commit_id, sha, parent_id, parent.hexsha))
else:
to_insert.append((repo_id, commit_id, sha, None, parent.hexsha))
if to_insert:
cursor = self._cnx.cursor()
query = "INSERT IGNORE INTO commit_parent(repo_id, commit_id, commit_sha, parent_id, parent_sha) " \
"VALUES (%s, %s, %s, %s, %s)"
cursor.executemany(query, [i for i in to_insert])
self._cnx.commit()
cursor.close()
def insert_commits_in_reference(self, commits_data):
"""
inserts commits to DB all together
:type commits_data: list of Object
:param commits_data: commit data
"""
if commits_data:
cursor = self._cnx.cursor()
query = "INSERT IGNORE INTO commit_in_reference(repo_id, commit_id, ref_id) VALUES (%s, %s, %s)"
cursor.executemany(query, commits_data)
self._cnx.commit()
cursor.close()
def insert_commit_in_reference(self, repo_id, commit_id, ref_id):
"""
inserts commit to DB
:type repo_id: int
:param repo_id: id of the repository
:type commit_id: int
:param commit_id: id of the commit
:type ref_id: int
:param ref_id: id of the reference
"""
cursor = self._cnx.cursor()
query = "INSERT IGNORE INTO commit_in_reference " \
"VALUES (%s, %s, %s)"
arguments = [repo_id, commit_id, ref_id]
cursor.execute(query, arguments)
self._cnx.commit()
cursor.close()
def insert_line_details(self, file_modification_id, detail):
"""
inserts line details to DB
:type file_modification_id: int
:param file_modification_id: id of the file modification
:type detail: str
:param detail: line content
"""
cursor = self._cnx.cursor()
query = "INSERT IGNORE INTO line_detail " \
"VALUES (%s, %s, %s, %s, %s, %s, %s)"
arguments = [file_modification_id, detail[0], detail[1], detail[2], detail[3], detail[4], detail[5]]
cursor.execute(query, arguments)
self._cnx.commit()
cursor.close()
def select_file_modification_id(self, commit_id, file_id):
"""
selects file modification id
:type commit_id: int
:param commit_id: id of the commit
:type file_id: int
:param file_id: id of the file
"""
cursor = self._cnx.cursor()
query = "SELECT id " \
"FROM file_modification " \
"WHERE commit_id = %s AND file_id = %s"
arguments = [commit_id, file_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
found = None
if row:
found = row[0]
cursor.close()
return found
def insert_file_modification(self, commit_id, file_id, status, additions, deletions, changes, patch_content):
"""
inserts file modification to DB
:type commit_id: int
:param commit_id: id of the commit
:type file_id: int
:param file_id: id of the file
:type status: str
:param status: type of the modification
:type additions: int
:param additions: number of additions
:type deletions: int
:param deletions: number of deletions
:type changes: int
:param changes: number of changes
:type patch_content: str
:param patch_content: content of the patch
"""
cursor = self._cnx.cursor()
query = "INSERT IGNORE INTO file_modification " \
"VALUES (NULL, %s, %s, %s, %s, %s, %s, %s)"
arguments = [commit_id, file_id, status, additions, deletions, changes, patch_content]
cursor.execute(query, arguments)
self._cnx.commit()
cursor.close()
def insert_file_renamed(self, repo_id, current_file_id, previous_file_id, file_modification_id):
"""
inserts file renamed information
:type repo_id: int
:param repo_id: id of the repository
:type current_file_id: int
:param current_file_id: id of the renamed file
:type previous_file_id: int
:param previous_file_id: id of the file before renaming
:type file_modification_id: int
:param file_modification_id: id of the file modification
"""
cursor = self._cnx.cursor()
query = "INSERT IGNORE INTO file_renamed " \
"VALUES (%s, %s, %s, %s)"
arguments = [repo_id, current_file_id, previous_file_id, file_modification_id]
cursor.execute(query, arguments)
self._cnx.commit()
cursor.close()
def insert_file(self, repo_id, name, ext=None):
"""
inserts file
:type repo_id: int
:param repo_id: id of the repository
:type name: str
:param name: name of the file (full path)
:type ext: str
:param ext: extension of the file
"""
cursor = self._cnx.cursor()
query = "INSERT IGNORE INTO file " \
"VALUES (%s, %s, %s, %s)"
# extract file extension from file path if not passed
if not ext:
ext = name.split('.')[-1]
arguments = [None, repo_id, name, ext]
cursor.execute(query, arguments)
self._cnx.commit()
cursor.close()
def select_file_id_before_date(self, repo_id, name, before_date):
"""
selects id of the file before date
:type repo_id: int
:param repo_id: id of the repository
:type name: str
:param name: name of the file (full path)
:type before_date: timestamp
:param before_date: date
"""
cursor = self._cnx.cursor()
query = "SELECT DISTINCT f.id " \
"FROM file f JOIN file_modification fm ON f.id = fm.file_id " \
"JOIN commit c ON c.id = fm.commit_id " \
"WHERE f.name = %s AND f.repo_id = %s AND fm.status = 'added' " \
"AND c.authored_date <= '" + str(before_date) + "' "
arguments = [name, repo_id]
cursor.execute(query, arguments)
try:
id = cursor.fetchone()[0]
except:
id = None
cursor.close()
return id
def select_file_id(self, repo_id, name):
"""
selects id of the file
:type repo_id: int
:param repo_id: id of the repository
:type name: str
:param name: name of the file (full path)
"""
cursor = self._cnx.cursor()
query = "SELECT id " \
"FROM file " \
"WHERE name = %s AND repo_id = %s"
arguments = [name, repo_id]
cursor.execute(query, arguments)
try:
id = cursor.fetchone()[0]
except:
id = None
cursor.close()
return id
def insert_reference(self, repo_id, ref_name, ref_type):
"""
inserts reference
:type repo_id: int
:param repo_id: id of the repository
:type ref_name: str
:param ref_name: name of the reference
:type ref_type: str
:param ref_type: type of the reference (branch or tag)
"""
cursor = self._cnx.cursor()
query = "INSERT IGNORE INTO reference " \
"VALUES (%s, %s, %s, %s)"
arguments = [None, repo_id, ref_name, ref_type]
cursor.execute(query, arguments)
self._cnx.commit()
cursor.close()
def select_reference_name(self, repo_id, ref_id):
"""
selects reference name by its id
:type repo_id: int
:param repo_id: id of the repository
:type ref_id: int
:param ref_id: id of the reference
"""
found = None
cursor = self._cnx.cursor()
query = "SELECT name " \
"FROM reference " \
"WHERE id = %s and repo_id = %s"
arguments = [ref_id, repo_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
cursor.close()
return found
def select_reference_id(self, repo_id, ref_name):
"""
selects reference id by its name
:type repo_id: int
:param repo_id: id of the repository
:type ref_name: str
:param ref_name: name of the reference
"""
found = None
cursor = self._cnx.cursor()
query = "SELECT id " \
"FROM reference " \
"WHERE name = %s and repo_id = %s"
arguments = [ref_name, repo_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
cursor.close()
return found
def insert_commit(self, repo_id, sha, message, author_id, committer_id, authored_date, committed_date, size):
"""
inserts commit to DB
:type repo_id: int
:param repo_id: id of the repository
:type sha: str
:param sha: SHA of the commit
:type message: str
:param message: message of the commit
:type author_id: int
:param author_id: author id of the commit
:type committer_id: int
:param committer_id: committer id of the commit
:type authored_date: str
:param authored_date: authored date of the commit
:type committed_date: str
:param committed_date: committed date of the commit
:type size: int
:param size: size of the commit
"""
cursor = self._cnx.cursor()
query = "INSERT IGNORE INTO commit " \
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
arguments = [None, repo_id, sha, message.strip(), author_id, committer_id, authored_date, committed_date, size]
cursor.execute(query, arguments)
self._cnx.commit()
cursor.close()
def update_commit_parent(self, parent_id, parent_sha, repo_id):
"""
inserts commit parent to DB
:type parent_id: int
:param parent_id: id of the commit parent
:type parent_sha: str
:param parent_sha: SHA of the commit parent
:type repo_id: int
:param repo_id: id of the repository
"""
cursor = self._cnx.cursor()
query_update = "UPDATE commit_parent " \
"SET parent_id = %s " \
"WHERE parent_id IS NULL AND parent_sha = %s AND repo_id = %s "
arguments = [parent_id, parent_sha, repo_id]
cursor.execute(query_update, arguments)
self._cnx.commit()
cursor.close()
def fix_commit_parent_table(self, repo_id):
"""
checks for missing commit parent information and fixes it
:type repo_id: int
:param repo_id: id of the repository
"""
cursor = self._cnx.cursor()
query_select = "SELECT parent_sha " \
"FROM commit_parent " \
"WHERE parent_id IS NULL AND repo_id = %s"
arguments = [repo_id]
cursor.execute(query_select, arguments)
row = cursor.fetchone()
while row:
parent_sha = row[0]
parent_id = self.select_commit_id(parent_sha, repo_id)
self.update_commit_parent(parent_id, parent_sha, repo_id)
row = cursor.fetchone()
cursor.close()
def select_commit_id(self, sha, repo_id):
"""
selects id of a commit by its SHA
:type sha: str
:param sha: SHA of the commit
:type repo_id: int
:param repo_id: id of the repository
"""
found = None
cursor = self._cnx.cursor()
query = "SELECT id " \
"FROM commit " \
"WHERE sha = %s AND repo_id = %s"
arguments = [sha, repo_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
cursor.close()
return found
def select_commit_id_before_date(self, sha, repo_id, before_date):
"""
selects id of a commit by its SHA before a given date
:type sha: str
:param sha: SHA of the commit
:type repo_id: int
:param repo_id: id of the repository
:type before_date: timestamp
:param before_date: date
"""
found = None
cursor = self._cnx.cursor()
query = "SELECT id " \
"FROM commit " \
"WHERE sha = %s AND repo_id = %s AND authored_date <= '" + str(before_date) + "' "
arguments = [sha, repo_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
cursor.close()
return found
def select_all_developer_ids(self, repo_id):
"""
selects all developers (committers or authors) of a given repo
:type repo_id: int
:param repo_id: id of the repository
"""
user_ids = []
cursor = self._cnx.cursor()
query = "SELECT c.author_id " \
"FROM commit c JOIN repository r ON c.repo_id = r.id JOIN user u ON u.id = c.author_id " \
"WHERE repo_id = %s AND u.name IS NOT NULL AND u.email IS NOT NULL " \
"UNION " \
"SELECT c.committer_id " \
"FROM commit c JOIN repository r ON c.repo_id = r.id JOIN user u ON u.id = c.committer_id " \
"WHERE repo_id = %s AND u.name IS NOT NULL AND u.email IS NOT NULL "
arguments = [repo_id, repo_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
while row:
user_id = row[0]
user_ids.append(user_id)
row = cursor.fetchone()
cursor.close()
return user_ids
def select_sha_commit_by_user(self, user_id, repo_id):
"""
selects the SHA of the first commit (authored or committed) by a given user id
:type user_id: int
:param user_id: id of the user
:type repo_id: int
:param repo_id: id of the repository
"""
found = None
cursor = self._cnx.cursor()
query = "SELECT sha " \
"FROM commit " \
"WHERE (author_id = %s OR committer_id = %s) AND repo_id = %s " \
"LIMIT 1"
arguments = [user_id, user_id, repo_id]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
return found
| |
import filecmp
import os
import shutil
import tempfile
import unittest
from test import support
from test.support import os_helper
class FileCompareTestCase(unittest.TestCase):
def setUp(self):
self.name = os_helper.TESTFN
self.name_same = os_helper.TESTFN + '-same'
self.name_diff = os_helper.TESTFN + '-diff'
data = 'Contents of file go here.\n'
for name in [self.name, self.name_same, self.name_diff]:
with open(name, 'w', encoding="utf-8") as output:
output.write(data)
with open(self.name_diff, 'a+', encoding="utf-8") as output:
output.write('An extra line.\n')
self.dir = tempfile.gettempdir()
def tearDown(self):
os.unlink(self.name)
os.unlink(self.name_same)
os.unlink(self.name_diff)
def test_matching(self):
self.assertTrue(filecmp.cmp(self.name, self.name),
"Comparing file to itself fails")
self.assertTrue(filecmp.cmp(self.name, self.name, shallow=False),
"Comparing file to itself fails")
self.assertTrue(filecmp.cmp(self.name, self.name_same),
"Comparing file to identical file fails")
self.assertTrue(filecmp.cmp(self.name, self.name_same, shallow=False),
"Comparing file to identical file fails")
def test_different(self):
self.assertFalse(filecmp.cmp(self.name, self.name_diff),
"Mismatched files compare as equal")
self.assertFalse(filecmp.cmp(self.name, self.dir),
"File and directory compare as equal")
def test_cache_clear(self):
first_compare = filecmp.cmp(self.name, self.name_same, shallow=False)
second_compare = filecmp.cmp(self.name, self.name_diff, shallow=False)
filecmp.clear_cache()
self.assertTrue(len(filecmp._cache) == 0,
"Cache not cleared after calling clear_cache")
class DirCompareTestCase(unittest.TestCase):
def setUp(self):
tmpdir = tempfile.gettempdir()
self.dir = os.path.join(tmpdir, 'dir')
self.dir_same = os.path.join(tmpdir, 'dir-same')
self.dir_diff = os.path.join(tmpdir, 'dir-diff')
# Another dir is created under dir_same, but it has a name from the
# ignored list so it should not affect testing results.
self.dir_ignored = os.path.join(self.dir_same, '.hg')
self.caseinsensitive = os.path.normcase('A') == os.path.normcase('a')
data = 'Contents of file go here.\n'
for dir in (self.dir, self.dir_same, self.dir_diff, self.dir_ignored):
shutil.rmtree(dir, True)
os.mkdir(dir)
subdir_path = os.path.join(dir, 'subdir')
os.mkdir(subdir_path)
if self.caseinsensitive and dir is self.dir_same:
fn = 'FiLe' # Verify case-insensitive comparison
else:
fn = 'file'
with open(os.path.join(dir, fn), 'w', encoding="utf-8") as output:
output.write(data)
with open(os.path.join(self.dir_diff, 'file2'), 'w', encoding="utf-8") as output:
output.write('An extra file.\n')
def tearDown(self):
for dir in (self.dir, self.dir_same, self.dir_diff):
shutil.rmtree(dir)
def test_default_ignores(self):
self.assertIn('.hg', filecmp.DEFAULT_IGNORES)
def test_cmpfiles(self):
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir, ['file']) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir_same, ['file']) ==
(['file'], [], []),
"Comparing directory to same fails")
# Try it with shallow=False
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir, ['file'],
shallow=False) ==
(['file'], [], []),
"Comparing directory to itself fails")
self.assertTrue(filecmp.cmpfiles(self.dir, self.dir_same, ['file'],
shallow=False),
"Comparing directory to same fails")
# Add different file2
with open(os.path.join(self.dir, 'file2'), 'w', encoding="utf-8") as output:
output.write('Different contents.\n')
self.assertFalse(filecmp.cmpfiles(self.dir, self.dir_same,
['file', 'file2']) ==
(['file'], ['file2'], []),
"Comparing mismatched directories fails")
def _assert_lists(self, actual, expected):
"""Assert that two lists are equal, up to ordering."""
self.assertEqual(sorted(actual), sorted(expected))
def test_dircmp(self):
# Check attributes for comparison of two identical directories
left_dir, right_dir = self.dir, self.dir_same
d = filecmp.dircmp(left_dir, right_dir)
self.assertEqual(d.left, left_dir)
self.assertEqual(d.right, right_dir)
if self.caseinsensitive:
self._assert_lists(d.left_list, ['file', 'subdir'])
self._assert_lists(d.right_list, ['FiLe', 'subdir'])
else:
self._assert_lists(d.left_list, ['file', 'subdir'])
self._assert_lists(d.right_list, ['file', 'subdir'])
self._assert_lists(d.common, ['file', 'subdir'])
self._assert_lists(d.common_dirs, ['subdir'])
self.assertEqual(d.left_only, [])
self.assertEqual(d.right_only, [])
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, [])
expected_report = [
"diff {} {}".format(self.dir, self.dir_same),
"Identical files : ['file']",
"Common subdirectories : ['subdir']",
]
self._assert_report(d.report, expected_report)
# Check attributes for comparison of two different directories (right)
left_dir, right_dir = self.dir, self.dir_diff
d = filecmp.dircmp(left_dir, right_dir)
self.assertEqual(d.left, left_dir)
self.assertEqual(d.right, right_dir)
self._assert_lists(d.left_list, ['file', 'subdir'])
self._assert_lists(d.right_list, ['file', 'file2', 'subdir'])
self._assert_lists(d.common, ['file', 'subdir'])
self._assert_lists(d.common_dirs, ['subdir'])
self.assertEqual(d.left_only, [])
self.assertEqual(d.right_only, ['file2'])
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, [])
expected_report = [
"diff {} {}".format(self.dir, self.dir_diff),
"Only in {} : ['file2']".format(self.dir_diff),
"Identical files : ['file']",
"Common subdirectories : ['subdir']",
]
self._assert_report(d.report, expected_report)
# Check attributes for comparison of two different directories (left)
left_dir, right_dir = self.dir, self.dir_diff
shutil.move(
os.path.join(self.dir_diff, 'file2'),
os.path.join(self.dir, 'file2')
)
d = filecmp.dircmp(left_dir, right_dir)
self.assertEqual(d.left, left_dir)
self.assertEqual(d.right, right_dir)
self._assert_lists(d.left_list, ['file', 'file2', 'subdir'])
self._assert_lists(d.right_list, ['file', 'subdir'])
self._assert_lists(d.common, ['file', 'subdir'])
self.assertEqual(d.left_only, ['file2'])
self.assertEqual(d.right_only, [])
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, [])
expected_report = [
"diff {} {}".format(self.dir, self.dir_diff),
"Only in {} : ['file2']".format(self.dir),
"Identical files : ['file']",
"Common subdirectories : ['subdir']",
]
self._assert_report(d.report, expected_report)
# Add different file2
with open(os.path.join(self.dir_diff, 'file2'), 'w', encoding="utf-8") as output:
output.write('Different contents.\n')
d = filecmp.dircmp(self.dir, self.dir_diff)
self.assertEqual(d.same_files, ['file'])
self.assertEqual(d.diff_files, ['file2'])
expected_report = [
"diff {} {}".format(self.dir, self.dir_diff),
"Identical files : ['file']",
"Differing files : ['file2']",
"Common subdirectories : ['subdir']",
]
self._assert_report(d.report, expected_report)
def test_dircmp_subdirs_type(self):
"""Check that dircmp.subdirs respects subclassing."""
class MyDirCmp(filecmp.dircmp):
pass
d = MyDirCmp(self.dir, self.dir_diff)
sub_dirs = d.subdirs
self.assertEqual(list(sub_dirs.keys()), ['subdir'])
sub_dcmp = sub_dirs['subdir']
self.assertEqual(type(sub_dcmp), MyDirCmp)
def test_report_partial_closure(self):
left_dir, right_dir = self.dir, self.dir_same
d = filecmp.dircmp(left_dir, right_dir)
left_subdir = os.path.join(left_dir, 'subdir')
right_subdir = os.path.join(right_dir, 'subdir')
expected_report = [
"diff {} {}".format(self.dir, self.dir_same),
"Identical files : ['file']",
"Common subdirectories : ['subdir']",
'',
"diff {} {}".format(left_subdir, right_subdir),
]
self._assert_report(d.report_partial_closure, expected_report)
def test_report_full_closure(self):
left_dir, right_dir = self.dir, self.dir_same
d = filecmp.dircmp(left_dir, right_dir)
left_subdir = os.path.join(left_dir, 'subdir')
right_subdir = os.path.join(right_dir, 'subdir')
expected_report = [
"diff {} {}".format(self.dir, self.dir_same),
"Identical files : ['file']",
"Common subdirectories : ['subdir']",
'',
"diff {} {}".format(left_subdir, right_subdir),
]
self._assert_report(d.report_full_closure, expected_report)
def _assert_report(self, dircmp_report, expected_report_lines):
with support.captured_stdout() as stdout:
dircmp_report()
report_lines = stdout.getvalue().strip().split('\n')
self.assertEqual(report_lines, expected_report_lines)
def test_main():
support.run_unittest(FileCompareTestCase, DirCompareTestCase)
if __name__ == "__main__":
test_main()
| |
"""This is the main collection of functions that redrain uses to do basically
everything.
"""
import time
import re
import os
import urllib
from feedparser import parse
from re import search, match
from datetime import datetime
import sys
# globals
CONFIG = dict() # basic config file, for bootstrapping everything else
PODCASTS = list() # podcast list
OLD_URLS = set() # urls of old episodes
OLD_GUIDS = set() # guids of old episodes
NEW_URLS = set() # urls of episodes that need to be comitted to file
NEW_GUIDS = set() # urls of episodes that need to be comitted to file
DEFAULT_CONFIG = { \
'f_oldshows': '~/.redrain/oldshows', \
'f_podcasts': '~/.redrain/podcasts', \
'd_download_dir': '~/.redrain/download/', \
'f_lastrun': '~/.redrain/lastrun'}
LASTRUN = datetime(2013, 8, 24, 0, 0)
# Small hack to make sure that redrain identifies itself by user-agent
class RRopener(urllib.FancyURLopener):
"""Hack -- improve later."""
version = "rrlib/0.4.5"
urllib._urlopener = RRopener()
def load_config(cfg_name='~/.redrain/config'):
"""Loads all needed config files for the program to run
Arguments -- cfg_name (default='~/.redrain/config')
Reads the base configuration file, then loads the oldshows and lastrun
files.
"""
global LASTRUN
path = fixpath(cfg_name)
# if we don't have a config file, create it
if os.path.exists(path) == False:
make_config()
# open and load the config file
f_config = open(path, 'rU')
for line in f_config.readlines():
# match a comment
rex = match(r'#', line)
if rex is not None:
continue
rex = match(r'(.+)=(.+)', line)
if rex is not None:
CONFIG[rex.group(1)] = rex.group(2)
# straighten up paths in the config
for path in CONFIG.keys():
rex = match(r'(f_)', path)
if rex is not None:
CONFIG[path] = fixpath(CONFIG[path])
# check for the 'oldshows' file; if it's not there, create it.
if os.path.exists(CONFIG['f_oldshows']) == False:
# create an empty file
open(CONFIG['f_oldshows'], 'w').close()
load_oldshows(CONFIG['f_oldshows'])
# check for the lastrun file and load or create it
if os.path.exists(CONFIG['f_lastrun']) == False:
f_last = open(CONFIG['f_lastrun'], 'w')
LASTRUN = datetime(2013, 8, 24, 0, 0)
for k in range(5):
f_last.write(str(LASTRUN.timetuple()[k]) + '\n')
f_last.flush()
f_last.close()
# load up the lastrun file
f_last = open(CONFIG['f_lastrun'], 'rU')
dnt = list()
for k in range(5):
dnt.append(int(f_last.readline()))
LASTRUN = datetime(dnt[0], dnt[1], dnt[2], dnt[3], dnt[4])
# make sure that any directories in the configuration actually exist.
# if they don't exist, create them.
for directory in CONFIG.keys():
rex = match(r'd_', directory)
if rex is not None:
path = fixpath(CONFIG[directory])
if os.path.exists(path) == False:
print path + " not found, creating path."
os.makedirs(path)
def make_config():
"""Creates a simple defaut config directory, file, and download dir.
Arguments -- none.
Creates a typical default config directory (~/.redrain) and the regular
download directory (~/.redrain/download) for the user. Also dumps the
keys and values from default_config to ~/.redrain/config .
"""
# create the ~/.redrain directory if it's not there
if os.path.exists(fixpath('~/.redrain')) == False:
os.mkdir(fixpath('~/.redrain/'))
# create the default download dir if it's not there
if os.path.exists(DEFAULT_CONFIG['d_download_dir']) == False:
os.mkdir(fixpath(DEFAULT_CONFIG['d_download_dir']) + '/')
# create the core config file and write defaults to it
f_config = open(fixpath('~/.redrain/config'), 'w')
for k in DEFAULT_CONFIG.keys():
f_config.write(k + '=' + DEFAULT_CONFIG[k] + '\n')
f_config.flush()
f_config.close()
def fixpath(user):
"""Normalizes a given path to a file or directory.
Arguments - A string that should point to a file or directory.
This is really just a simple wrapper around a couple functions in os.path
"""
return os.path.normpath(os.path.expanduser(user))
def load_oldshows(filename):
"""Loads the oldshows file.
Arguments -- a filename.
Scans the oldshows files for lines that start with either 'url=' or
'guid=' and loads them into OLD_URLS and OLD_GUIDS respectively. Each
line is loaded as a key and the value in the dictionaries is set to 1.
"""
f_old = open(filename, 'rU')
for line in f_old.readlines():
# discard a comment
rex = match(r'#', line)
if rex is not None:
continue
rex = match(r'(guid|url)=(.+)', line)
if rex is not None:
if rex.group(1) == 'url':
OLD_URLS.add(rex.group(2))
if rex.group(1) == 'guid':
OLD_GUIDS.add(rex.group(2))
def load_podcasts():
"""Scans the podcasts file in the config and loads it.
Arguments -- none.
Scans the file in CONFIG['f_podcasts'] for entries. Each entry is a
series of key=value pairs, and each entry is seperated by a percent
sign ('%'). At an absolute minimum, an entry needs to contain a feedurl
key. At present, the only other keys supported are 'skip' and 'nicename'.
"""
if os.path.exists(CONFIG['f_podcasts']) == False:
return
f_pods = open(CONFIG['f_podcasts'], 'rU')
show = dict()
for line in f_pods.readlines():
# match a key=value line
rex = match(r'(.+?)=(.+)', line)
if rex is not None:
show[rex.group(1)] = rex.group(2)
continue
# match a comment
rex = match(r'#', line)
if rex is not None:
continue
# match a % and start the next show
rex = match(r'%', line)
if rex is not None:
# skip the show if the entry contains "skip=true"
if show.get('skip', 'false') == 'true':
show = dict()
continue
# if there is a feedurl, we can use it. append it.
if 'feedurl' in show:
PODCASTS.append(show)
# if there isn't, warn the user
elif not 'feedurl' in show:
print 'Error: show did not have a feedurl.'
show = dict()
continue
def scrape_feed_url(url, nicename='NoneProvided'):
"""Downloads a given URL and scrapes it for episodes.
Arguments - a url (or even a file) that points to a XML feed.
Optionally, the 'nicename' parameter is passed along here.
Uses feedparser to examine a given feed and take the relevant bits of the
'entries' array and turn it into a list of dictionaries that is
returned to the end user. Six keys are in each 'episode' :
'url', 'title', 'guid', 'date', 'showname', and 'nicename'.
"""
showlist = []
fp_data = parse(url)
# This warning is badly placed; shouldn't print to console in redrain.py
if fp_data.bozo == 1:
print '[error]',
# iterate over the entries within the feed
for entry in fp_data.entries:
tmp = dict()
tmp['title'] = entry.title
tmp['guid'] = entry.guid
tmp['showname'] = fp_data.feed.title
tmp['nicename'] = nicename
# prep updated_parsed for conversion datetime object
dnt = list(entry.published_parsed[0:5])
tmp['date'] = datetime(dnt[0], dnt[1], dnt[2], dnt[3], dnt[4])
# within each entry is a list of enclosures (hopefully of length 1)
for enclosure in entry.enclosures:
tmp['url'] = enclosure['href']
# temp hack, but this fixes enclosures that lack certain attributes.
if valid_item(tmp) == True:
showlist.append(tmp)
return showlist
def valid_item(item):
"""Debug function: test to see if an item is up to spec."""
for key in ['title', 'guid', 'showname', 'nicename', 'date', 'url']:
if item.get(key, 'FAIL') == 'FAIL':
return False
return True
def filter_list(item):
"""Determines if a given episode is new enough to be downloaded.
Arguments - a dict. containing at least three keys: guid, url, and date.
Examines the provided dictionary and checks to see if the episode is new.
This is determined by checking to see if the guid or the url provided
already exist in the old_* hashes. It also compares the provided date
to the last time the program was run.
"""
count = 0
# check guids
if item['guid'] in OLD_GUIDS:
count = count + 1
# check urls
if item['url'] in OLD_URLS:
count = count + 1
# compare date
if (LASTRUN - item['date']).days >= 0:
count = count + 1
if count > 1:
return False
return True
def save_state():
"""Dumps urls and guids to the oldshow file and updates the lastrun file.
Arguments -- None.
Appends the keys in NEW_URLS and NEW_GUIDS to the oldshows file, with each
key prepended by guid= and url=. Also updates the lastrun file with the
current time.
"""
global NEW_URLS
global NEW_GUIDS
# open up 'oldshows'
f_old = open(CONFIG['f_oldshows'], 'a')
# save the urls
for url in NEW_URLS:
f_old.write('url=' + url + '\n')
# save the guids
for url in NEW_GUIDS:
f_old.write('guid=' + url + '\n')
# clean up
f_old.flush()
f_old.close()
# save datetime
f_last = open(CONFIG['f_lastrun'], 'w')
for k in time.gmtime()[0:5]:
f_last.write(str(k) + '\n')
f_last.flush()
f_last.close()
NEW_URLS = set()
NEW_GUIDS = set()
def sanitize_filename(fname):
"""Makes a given name safe for FAT32
Arguments : fname -- a string or unicode string.
Since FAT32 is the "lowest common denominator" of filesystems and is the
most likely one to be found on a mp3 player, this function changes unicode
strings to plain strings, truncates them to 250 characters and strips
"bad" characters out.
"""
# if fname is unicode, strip it first
if type(fname) == unicode:
fname = ''.join([x for x in fname if ord(x) > 31 and ord(x) < 129])
# turn into a string, reduce to 250 characters
fname = str(fname)[0:250]
# clean 'naughty' characters
naughty = ':;*?"|\/<>'
trans = dict(zip([x for x in naughty], ['' for x in xrange(len(naughty))]))
for key, value in trans.iteritems():
fname = fname.replace(key, value)
return fname
def dl_progress(count, blockSize, totalSize):
"""Support function for download_episode."""
percent = int(count*blockSize*100/totalSize)
sys.stdout.write("\r%d%%" % percent)
sys.stdout.flush()
def download_episode(episode, custom=None):
"""Downloads a podcast episode to the download directory.
Arguments : episode -- a small dictionary that contains the keys 'url'
and 'title'.
Simply downloads a specified episode to the configured download directory.
Makes a call to sanitize_filename to make the file safe to save anywhere.
"""
# construct filename
# - get extension from url
ext = sanitize_filename(search('(.+)(\..+?)$', episode['url']).group(2))
# clean up title, concatenate with extension and use it as the filename
fname = sanitize_filename(episode['title']) + ext
# skip downloading and bail if the user asked for it
if CONFIG.get('skipdl', 'false') == 'true':
mark_as_old(episode)
return
# download the file
if 'dl_file_name' in episode:
urllib.urlretrieve(episode['url'], \
fixpath(CONFIG['d_download_dir'] + custom), dl_progress)
else:
urllib.urlretrieve(episode['url'], \
fixpath(CONFIG['d_download_dir'] + fname), dl_progress)
# mark episode as old
mark_as_old(episode)
# save the state so we don't redownload a show if the program is terminated early.
save_state()
def mark_as_old(episode):
"""Registers a specified episode as "old".
Arguments : episode -- A small dictionary that contains at least two
keys : 'url', and 'guid'.
The data in these keys added to both the new urls/guids and the old
urls/guids dictionaries. They're added to "old" so that the same episode
isn't downloaded multiple times and "new" so that they get written to
file later.
"""
OLD_URLS.add(episode['url'])
OLD_GUIDS.add(episode['guid'])
NEW_URLS.add(episode['url'])
NEW_GUIDS.add(episode['guid'])
def custom_name(podcast, fstring):
"""Creates a custom episode name for a downloaded show.
Agruments : podcast -- a dict with particular keys and string - the string
that will be used to create the filename.
The string should contain items to be replaced as marked by percent signs
with braces indicate what the token should be replaced with. An example:
'%{show}-%{episode}.mp3' -- might come out as 'Metalcast- pisode 19.mp3'
"""
# copy the original hash
replacements = podcast.copy()
# expand the hash and make sure that everything in it is a string
# - create filename from url
replacements['ext'] = search('(.+)(\..+?)$', podcast['url']).group(2)
# - replace date and time with strings
tmp = replacements['date']
replacements['date'] = replacements['date'].strftime('%Y-%m-%d')
replacements['time'] = tmp.strftime('%H%M')
# - today's date and time strings (as opposed to 'updated' time/date)
tmp = time.localtime()
now = datetime(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4])
replacements['ltime'] = now.strftime('%H%M')
replacements['ldate'] = now.strftime('%Y-%m-%d')
# construct the regular expression from the keys of 'replacements'
allkeys = '%{('
for key in replacements.keys():
allkeys = allkeys + key + '|'
allkeys = allkeys[:-1] + ')}'
# replace the user-specified tokens
for _ in xrange(fstring.count('%')):
result = search(allkeys, fstring)
if result is not None:
fstring = re.sub('%{' + result.group(1) + '}', \
replacements[result.group(1)], fstring)
# clean it up, just in case
fstring = sanitize_filename(fstring)
# add in the extension
#fstring = fstring + replacements['ext']
# we're done, return the string
return fstring
| |
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Classes and functions for generic network communication over HTTP."""
import cookielib
import cStringIO as StringIO
import datetime
import httplib
import itertools
import json
import logging
import math
import os
import random
import re
import socket
import ssl
import threading
import time
import urllib
import urlparse
from third_party import requests
from third_party.requests import adapters
from third_party.requests import structures
from utils import oauth
from utils import tools
# TODO(vadimsh): Remove this once we don't have to support python 2.6 anymore.
def monkey_patch_httplib():
"""Patch httplib.HTTPConnection to have '_tunnel_host' attribute.
'requests' library (>= v2) accesses 'HTTPConnection._tunnel_host' attribute
added only in python 2.6.3. This function patches HTTPConnection to have it
on python 2.6.2 as well.
"""
conn = httplib.HTTPConnection('example.com')
if not hasattr(conn, '_tunnel_host'):
httplib.HTTPConnection._tunnel_host = None
monkey_patch_httplib()
# Default maximum number of attempts to trying opening a url before aborting.
URL_OPEN_MAX_ATTEMPTS = 30
# Default timeout when retrying.
URL_OPEN_TIMEOUT = 6*60.
# Default timeout when reading from open HTTP connection.
URL_READ_TIMEOUT = 60
# Content type for url encoded POST body.
URL_ENCODED_FORM_CONTENT_TYPE = 'application/x-www-form-urlencoded'
# Content type for JSON body.
JSON_CONTENT_TYPE = 'application/json; charset=UTF-8'
# Default content type for POST body.
DEFAULT_CONTENT_TYPE = URL_ENCODED_FORM_CONTENT_TYPE
# Content type -> function that encodes a request body.
CONTENT_ENCODERS = {
URL_ENCODED_FORM_CONTENT_TYPE:
urllib.urlencode,
JSON_CONTENT_TYPE:
lambda x: json.dumps(x, sort_keys=True, separators=(',', ':')),
}
# Google Storage URL regular expression.
GS_STORAGE_HOST_URL_RE = re.compile(r'https://.*\.storage\.googleapis\.com')
# Global (for now) map: server URL (http://example.com) -> HttpService instance.
# Used by get_http_service to cache HttpService instances.
_http_services = {}
_http_services_lock = threading.Lock()
# This lock ensures that user won't be confused with multiple concurrent
# login prompts.
_auth_lock = threading.Lock()
# Set in 'set_oauth_config'. If 'set_oauth_config' is not called before the
# first request, will be set to oauth.make_oauth_config().
_auth_config = None
# A class to use to send HTTP requests. Can be changed by 'set_engine_class'.
# Default is RequestsLibEngine.
_request_engine_cls = None
class NetError(IOError):
"""Generic network related error."""
def __init__(self, inner_exc=None):
super(NetError, self).__init__(str(inner_exc or self.__doc__))
self.inner_exc = inner_exc
self.verbose_info = None
class TimeoutError(NetError):
"""Timeout while reading HTTP response."""
class ConnectionError(NetError):
"""Failed to connect to the server."""
class HttpError(NetError):
"""Server returned HTTP error code."""
def __init__(self, code, content_type, inner_exc):
super(HttpError, self).__init__(inner_exc)
self.code = code
self.content_type = content_type
def set_engine_class(engine_cls):
"""Globally changes a class to use to execute HTTP requests.
Default engine is RequestsLibEngine that uses 'requests' library. Changing the
engine on the fly is not supported. It must be set before the first request.
Custom engine class should support same public interface as RequestsLibEngine.
"""
global _request_engine_cls
assert _request_engine_cls is None
_request_engine_cls = engine_cls
def get_engine_class():
"""Returns a class to use to execute HTTP requests."""
return _request_engine_cls or RequestsLibEngine
def url_open(url, **kwargs): # pylint: disable=W0621
"""Attempts to open the given url multiple times.
|data| can be either:
- None for a GET request
- str for pre-encoded data
- list for data to be encoded
- dict for data to be encoded
See HttpService.request for a full list of arguments.
Returns HttpResponse object, where the response may be read from, or None
if it was unable to connect.
"""
urlhost, urlpath = split_server_request_url(url)
service = get_http_service(urlhost)
return service.request(urlpath, **kwargs)
def url_read(url, **kwargs):
"""Attempts to open the given url multiple times and read all data from it.
Accepts same arguments as url_open function.
Returns all data read or None if it was unable to connect or read the data.
"""
kwargs['stream'] = False
response = url_open(url, **kwargs)
if not response:
return None
try:
return response.read()
except TimeoutError:
return None
def url_read_json(url, **kwargs):
"""Attempts to open the given url multiple times and read all data from it.
Accepts same arguments as url_open function.
Returns all data read or None if it was unable to connect or read the data.
"""
urlhost, urlpath = split_server_request_url(url)
service = get_http_service(urlhost)
try:
return service.json_request(urlpath, **kwargs)
except TimeoutError:
return None
def url_retrieve(filepath, url, **kwargs):
"""Downloads an URL to a file. Returns True on success."""
response = url_open(url, **kwargs)
if not response:
return False
try:
with open(filepath, 'wb') as f:
while True:
buf = response.read(65536)
if not buf:
return True
f.write(buf)
except (IOError, OSError, TimeoutError):
try:
os.remove(filepath)
except IOError:
pass
return False
def split_server_request_url(url):
"""Splits the url into scheme+netloc and path+params+query+fragment."""
url_parts = list(urlparse.urlparse(url))
urlhost = '%s://%s' % (url_parts[0], url_parts[1])
urlpath = urlparse.urlunparse(['', ''] + url_parts[2:])
return urlhost, urlpath
def fix_url(url):
"""Fixes an url to https."""
parts = urlparse.urlparse(url, 'https')
if parts.query:
raise ValueError('doesn\'t support query parameter.')
if parts.fragment:
raise ValueError('doesn\'t support fragment in the url.')
# urlparse('foo.com') will result in netloc='', path='foo.com', which is not
# what is desired here.
new = list(parts)
if not new[1] and new[2]:
new[1] = new[2].rstrip('/')
new[2] = ''
new[2] = new[2].rstrip('/')
return urlparse.urlunparse(new)
def get_http_service(urlhost, allow_cached=True):
"""Returns existing or creates new instance of HttpService that can send
requests to given base urlhost.
"""
def new_service():
# Create separate authenticator only if engine is not providing
# authentication already. Also we use signed URLs for Google Storage, no
# need for special authentication.
authenticator = None
engine_cls = get_engine_class()
is_gs = GS_STORAGE_HOST_URL_RE.match(urlhost)
conf = get_oauth_config()
if not engine_cls.provides_auth and not is_gs and not conf.disabled:
authenticator = OAuthAuthenticator(urlhost, conf)
return HttpService(
urlhost,
engine=engine_cls(),
authenticator=authenticator)
# Ensure consistency in url naming.
urlhost = str(urlhost).lower().rstrip('/')
if not allow_cached:
return new_service()
with _http_services_lock:
service = _http_services.get(urlhost)
if not service:
service = new_service()
_http_services[urlhost] = service
return service
def set_oauth_config(config):
"""Defines what OAuth configuration to use for authentication.
If request engine (see get_engine_class) provides authentication already (as
indicated by its 'provides_auth=True' class property) this setting is ignored.
Arguments:
config: oauth.OAuthConfig instance.
"""
global _auth_config
_auth_config = config
def get_oauth_config():
"""Returns global OAuthConfig as set by 'set_oauth_config' or default one."""
return _auth_config or oauth.make_oauth_config()
def get_case_insensitive_dict(original):
"""Given a dict with string keys returns new CaseInsensitiveDict.
Raises ValueError if there are duplicate keys.
"""
normalized = structures.CaseInsensitiveDict(original or {})
if len(normalized) != len(original):
raise ValueError('Duplicate keys in: %s' % repr(original))
return normalized
class HttpService(object):
"""Base class for a class that provides an API to HTTP based service:
- Provides 'request' method.
- Supports automatic request retries.
- Thread safe.
"""
def __init__(self, urlhost, engine, authenticator=None):
self.urlhost = urlhost
self.engine = engine
self.authenticator = authenticator
@staticmethod
def is_transient_http_error(code, retry_404, retry_50x, suburl, content_type):
"""Returns True if given HTTP response code is a transient error."""
# Google Storage can return this and it should be retried.
if code == 408:
return True
if code == 404:
# Retry 404 if allowed by the caller.
if retry_404:
return retry_404
# Transparently retry 404 IIF it is a CloudEndpoints API call *and* the
# result is not JSON. This assumes that we only use JSON encoding.
return (
suburl.startswith('/_ah/api/') and
not content_type.startswith('application/json'))
# All other 4** errors are fatal.
if code < 500:
return False
# Retry >= 500 error only if allowed by the caller.
return retry_50x
@staticmethod
def encode_request_body(body, content_type):
"""Returns request body encoded according to its content type."""
# No body or it is already encoded.
if body is None or isinstance(body, str):
return body
# Any body should have content type set.
assert content_type, 'Request has body, but no content type'
encoder = CONTENT_ENCODERS.get(content_type)
assert encoder, ('Unknown content type %s' % content_type)
return encoder(body)
def login(self, allow_user_interaction):
"""Runs authentication flow to refresh short lived access token.
Authentication flow may need to interact with the user (read username from
stdin, open local browser for OAuth2, etc.). If interaction is required and
|allow_user_interaction| is False, the login will silently be considered
failed (i.e. this function returns False).
'request' method always uses non-interactive login, so long-lived
authentication tokens (OAuth2 refresh token, etc) have to be set up
manually by developer (by calling 'auth.py login' perhaps) prior running
any swarming or isolate scripts.
"""
# Use global lock to ensure two authentication flows never run in parallel.
with _auth_lock:
if self.authenticator:
return self.authenticator.login(allow_user_interaction)
return False
def logout(self):
"""Purges access credentials from local cache."""
if self.authenticator:
self.authenticator.logout()
def request(
self,
urlpath,
data=None,
content_type=None,
max_attempts=URL_OPEN_MAX_ATTEMPTS,
retry_404=False,
retry_50x=True,
timeout=URL_OPEN_TIMEOUT,
read_timeout=URL_READ_TIMEOUT,
stream=True,
method=None,
headers=None,
follow_redirects=True):
"""Attempts to open the given url multiple times.
|urlpath| is relative to the server root, i.e. '/some/request?param=1'.
|data| can be either:
- None for a GET request
- str for pre-encoded data
- list for data to be form-encoded
- dict for data to be form-encoded
- Optionally retries HTTP 404 and 50x.
- Retries up to |max_attempts| times. If None or 0, there's no limit in the
number of retries.
- Retries up to |timeout| duration in seconds. If None or 0, there's no
limit in the time taken to do retries.
- If both |max_attempts| and |timeout| are None or 0, this functions retries
indefinitely.
If |method| is given it can be 'DELETE', 'GET', 'POST' or 'PUT' and it will
be used when performing the request. By default it's GET if |data| is None
and POST if |data| is not None.
If |headers| is given, it should be a dict with HTTP headers to append
to request. Caller is responsible for providing headers that make sense.
If |follow_redirects| is True, will transparently follow HTTP redirects,
otherwise redirect response will be returned as is. It can be recognized
by the presence of 'Location' response header.
If |read_timeout| is not None will configure underlying socket to
raise TimeoutError exception whenever there's no response from the server
for more than |read_timeout| seconds. It can happen during any read
operation so once you pass non-None |read_timeout| be prepared to handle
these exceptions in subsequent reads from the stream.
Returns a file-like object, where the response may be read from, or None
if it was unable to connect. If |stream| is False will read whole response
into memory buffer before returning file-like object that reads from this
memory buffer.
"""
assert urlpath and urlpath[0] == '/', urlpath
if data is not None:
assert method in (None, 'DELETE', 'POST', 'PUT')
method = method or 'POST'
content_type = content_type or DEFAULT_CONTENT_TYPE
body = self.encode_request_body(data, content_type)
else:
assert method in (None, 'DELETE', 'GET')
method = method or 'GET'
body = None
assert not content_type, 'Can\'t use content_type on %s' % method
# Prepare request info.
parsed = urlparse.urlparse('/' + urlpath.lstrip('/'))
resource_url = urlparse.urljoin(self.urlhost, parsed.path)
query_params = urlparse.parse_qsl(parsed.query)
# Prepare headers.
headers = get_case_insensitive_dict(headers or {})
if body is not None:
headers['Content-Length'] = len(body)
if content_type:
headers['Content-Type'] = content_type
last_error = None
auth_attempted = False
for attempt in retry_loop(max_attempts, timeout):
# Log non-first attempt.
if attempt.attempt:
logging.warning(
'Retrying request %s, attempt %d/%d...',
resource_url, attempt.attempt, max_attempts)
try:
# Prepare and send a new request.
request = HttpRequest(
method, resource_url, query_params, body,
headers, read_timeout, stream, follow_redirects)
if self.authenticator:
self.authenticator.authorize(request)
response = self.engine.perform_request(request)
response._timeout_exc_classes = self.engine.timeout_exception_classes()
logging.debug('Request %s succeeded', request.get_full_url())
return response
except (ConnectionError, TimeoutError) as e:
last_error = e
logging.warning(
'Unable to open url %s on attempt %d.\n%s',
request.get_full_url(), attempt.attempt, self._format_error(e))
continue
except HttpError as e:
last_error = e
# Access denied -> authenticate.
if e.code in (401, 403):
logging.warning(
'Authentication is required for %s on attempt %d.\n%s',
request.get_full_url(), attempt.attempt, self._format_error(e))
# Try forcefully refresh the token. If it doesn't help, then server
# does not support authentication or user doesn't have required
# access.
if not auth_attempted:
auth_attempted = True
if self.login(allow_user_interaction=False):
# Success! Run request again immediately.
attempt.skip_sleep = True
continue
# Authentication attempt was unsuccessful.
logging.error(
'Unable to authenticate to %s (%s).',
self.urlhost, self._format_error(e))
if self.authenticator:
logging.error(
'Use auth.py to login: python auth.py login --service=%s',
self.urlhost)
return None
# Hit a error that can not be retried -> stop retry loop.
if not self.is_transient_http_error(
e.code, retry_404, retry_50x, parsed.path, e.content_type):
# This HttpError means we reached the server and there was a problem
# with the request, so don't retry.
logging.warning(
'Able to connect to %s but an exception was thrown.\n%s',
request.get_full_url(), self._format_error(e, verbose=True))
return None
# Retry all other errors.
logging.warning(
'Server responded with error on %s on attempt %d.\n%s',
request.get_full_url(), attempt.attempt, self._format_error(e))
continue
logging.error(
'Unable to open given url, %s, after %d attempts.\n%s',
request.get_full_url(), max_attempts,
self._format_error(last_error, verbose=True))
return None
def json_request(self, urlpath, data=None, **kwargs):
"""Sends JSON request to the server and parses JSON response it get back.
Arguments:
urlpath: relative request path (e.g. '/auth/v1/...').
data: object to serialize to JSON and sent in the request.
See self.request() for more details.
Returns:
Deserialized JSON response on success, None on error or timeout.
"""
content_type = JSON_CONTENT_TYPE if data is not None else None
response = self.request(
urlpath, content_type=content_type, data=data, stream=False, **kwargs)
if not response:
return None
try:
text = response.read()
if not text:
return None
except TimeoutError:
return None
try:
return json.loads(text)
except ValueError as e:
logging.error('Not a JSON response when calling %s: %s; full text: %s',
urlpath, e, text)
return None
def _format_error(self, exc, verbose=False):
"""Returns readable description of a NetError."""
if not isinstance(exc, NetError):
return str(exc)
if not verbose:
return str(exc.inner_exc or exc)
# Avoid making multiple calls to parse_request_exception since they may
# have side effects on the exception, e.g. urllib2 based exceptions are in
# fact file-like objects that can not be read twice.
if exc.verbose_info is None:
out = [str(exc.inner_exc or exc)]
headers, body = self.engine.parse_request_exception(exc.inner_exc)
if headers or body:
out.append('----------')
if headers:
for header, value in headers:
if not header.startswith('x-'):
out.append('%s: %s' % (header.capitalize(), value))
out.append('')
out.append(body or '<empty body>')
out.append('----------')
exc.verbose_info = '\n'.join(out)
return exc.verbose_info
class HttpRequest(object):
"""Request to HttpService."""
def __init__(
self, method, url, params, body,
headers, timeout, stream, follow_redirects):
"""Arguments:
|method| - HTTP method to use
|url| - relative URL to the resource, without query parameters
|params| - list of (key, value) pairs to put into GET parameters
|body| - encoded body of the request (None or str)
|headers| - dict with request headers
|timeout| - socket read timeout (None to disable)
|stream| - True to stream response from socket
|follow_redirects| - True to follow HTTP redirects.
"""
self.method = method
self.url = url
self.params = params[:]
self.body = body
self.headers = headers.copy()
self.timeout = timeout
self.stream = stream
self.follow_redirects = follow_redirects
self._cookies = None
@property
def cookies(self):
"""CookieJar object that will be used for cookies in this request."""
if self._cookies is None:
self._cookies = cookielib.CookieJar()
return self._cookies
def get_full_url(self):
"""Resource URL with url-encoded GET parameters."""
if not self.params:
return self.url
else:
return '%s?%s' % (self.url, urllib.urlencode(self.params))
def make_fake_response(self, content='', headers=None):
"""Makes new fake HttpResponse to this request, useful in tests."""
return HttpResponse.get_fake_response(content, self.get_full_url(), headers)
class HttpResponse(object):
"""Response from HttpService."""
def __init__(self, stream, url, headers):
self._stream = stream
self._url = url
self._headers = get_case_insensitive_dict(headers)
self._read = 0
self._timeout_exc_classes = ()
@property
def content_length(self):
"""Total length to the response or None if not known in advance."""
length = self.get_header('Content-Length')
return int(length) if length is not None else None
def get_header(self, header):
"""Returns response header (as str) or None if no such header."""
return self._headers.get(header)
def read(self, size=None):
"""Reads up to |size| bytes from the stream and returns them.
If |size| is None reads all available bytes.
Raises TimeoutError on read timeout.
"""
assert isinstance(self._timeout_exc_classes, tuple)
assert all(issubclass(e, Exception) for e in self._timeout_exc_classes)
try:
# cStringIO has a bug: stream.read(None) is not the same as stream.read().
data = self._stream.read() if size is None else self._stream.read(size)
self._read += len(data)
return data
except self._timeout_exc_classes as e:
logging.error('Timeout while reading from %s, read %d of %s: %s',
self._url, self._read, self.content_length, e)
raise TimeoutError(e)
@classmethod
def get_fake_response(cls, content, url, headers=None):
"""Returns HttpResponse with predefined content, useful in tests."""
headers = dict(headers or {})
headers['Content-Length'] = len(content)
return cls(StringIO.StringIO(content), url, headers)
class Authenticator(object):
"""Base class for objects that know how to authenticate into http services."""
def authorize(self, request):
"""Add authentication information to the request."""
def login(self, allow_user_interaction):
"""Run interactive authentication flow refreshing the token."""
raise NotImplementedError()
def logout(self):
"""Purges access credentials from local cache."""
class RequestsLibEngine(object):
"""Class that knows how to execute HttpRequests via requests library."""
# This engine doesn't know how to authenticate requests on transport level.
provides_auth = False
@classmethod
def parse_request_exception(cls, exc):
"""Extracts HTTP headers and body from inner exceptions put in HttpError."""
if isinstance(exc, requests.HTTPError):
return exc.response.headers.items(), exc.response.content
return None, None
@classmethod
def timeout_exception_classes(cls):
"""A tuple of exception classes that represent timeout.
Will be caught while reading a streaming response in HttpResponse.read and
transformed to TimeoutError.
"""
return (socket.timeout, ssl.SSLError, requests.Timeout)
def __init__(self):
super(RequestsLibEngine, self).__init__()
self.session = requests.Session()
# Configure session.
self.session.trust_env = False
self.session.verify = tools.get_cacerts_bundle()
# Configure connection pools.
for protocol in ('https://', 'http://'):
self.session.mount(protocol, adapters.HTTPAdapter(
pool_connections=64,
pool_maxsize=64,
max_retries=0,
pool_block=False))
def perform_request(self, request):
"""Sends a HttpRequest to the server and reads back the response.
Returns HttpResponse.
Raises:
ConnectionError - failed to establish connection to the server.
TimeoutError - timeout while connecting or reading response.
HttpError - server responded with >= 400 error code.
"""
try:
# response is a requests.models.Response.
response = self.session.request(
method=request.method,
url=request.url,
params=request.params,
data=request.body,
headers=request.headers,
cookies=request.cookies,
timeout=request.timeout,
stream=request.stream,
allow_redirects=request.follow_redirects)
response.raise_for_status()
if request.stream:
stream = response.raw
else:
stream = StringIO.StringIO(response.content)
return HttpResponse(stream, request.get_full_url(), response.headers)
except requests.Timeout as e:
raise TimeoutError(e)
except requests.HTTPError as e:
raise HttpError(
e.response.status_code, e.response.headers.get('Content-Type'), e)
except (requests.ConnectionError, socket.timeout, ssl.SSLError) as e:
raise ConnectionError(e)
class OAuthAuthenticator(Authenticator):
"""Uses OAuth Authorization header to authenticate requests."""
def __init__(self, urlhost, config):
super(OAuthAuthenticator, self).__init__()
assert isinstance(config, oauth.OAuthConfig)
self.urlhost = urlhost
self.config = config
self._lock = threading.Lock()
self._access_token = None
def authorize(self, request):
with self._lock:
# Load from cache on a first access.
if not self._access_token:
self._access_token = oauth.load_access_token(self.urlhost, self.config)
# Refresh if expired.
need_refresh = True
if self._access_token:
if self._access_token.expires_at is not None:
# Allow 5 min of clock skew.
now = datetime.datetime.utcnow() + datetime.timedelta(seconds=300)
need_refresh = now >= self._access_token.expires_at
else:
# Token without expiration time never expired.
need_refresh = False
if need_refresh:
self._access_token = oauth.create_access_token(
self.urlhost, self.config, False)
if self._access_token:
request.headers['Authorization'] = (
'Bearer %s' % self._access_token.token)
def login(self, allow_user_interaction):
with self._lock:
# Forcefully refresh the token.
self._access_token = oauth.create_access_token(
self.urlhost, self.config, allow_user_interaction)
return self._access_token is not None
def logout(self):
with self._lock:
self._access_token = None
oauth.purge_access_token(self.urlhost, self.config)
class RetryAttempt(object):
"""Contains information about current retry attempt.
Yielded from retry_loop.
"""
def __init__(self, attempt, remaining):
"""Information about current attempt in retry loop:
|attempt| - zero based index of attempt.
|remaining| - how much time is left before retry loop finishes retries.
"""
self.attempt = attempt
self.remaining = remaining
self.skip_sleep = False
def calculate_sleep_before_retry(attempt, max_duration):
"""How long to sleep before retrying an attempt in retry_loop."""
# Maximum sleeping time. We're hammering a cloud-distributed service, it'll
# survive.
MAX_SLEEP = 10.
# random.random() returns [0.0, 1.0). Starts with relatively short waiting
# time by starting with 1.5/2+1.5^-1 median offset.
duration = (random.random() * 1.5) + math.pow(1.5, (attempt - 1))
assert duration > 0.1
duration = min(MAX_SLEEP, duration)
if max_duration:
duration = min(max_duration, duration)
return duration
def sleep_before_retry(attempt, max_duration):
"""Sleeps for some amount of time when retrying the attempt in retry_loop.
To be mocked in tests.
"""
time.sleep(calculate_sleep_before_retry(attempt, max_duration))
def current_time():
"""Used by retry loop to get current time.
To be mocked in tests.
"""
return time.time()
def retry_loop(max_attempts=None, timeout=None):
"""Yields whenever new attempt to perform some action is needed.
Yields instances of RetryAttempt class that contains information about current
attempt. Setting |skip_sleep| attribute of RetryAttempt to True will cause
retry loop to run next attempt immediately.
"""
start = current_time()
for attempt in itertools.count():
# Too many attempts?
if max_attempts and attempt == max_attempts:
break
# Retried for too long?
remaining = (timeout - (current_time() - start)) if timeout else None
if remaining is not None and remaining < 0:
break
# Kick next iteration.
attemp_obj = RetryAttempt(attempt, remaining)
yield attemp_obj
if attemp_obj.skip_sleep:
continue
# Only sleep if we are going to try again.
if max_attempts and attempt != max_attempts - 1:
remaining = (timeout - (current_time() - start)) if timeout else None
if remaining is not None and remaining < 0:
break
sleep_before_retry(attempt, remaining)
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.gcp_pubsub_hook import PubSubHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class PubSubTopicCreateOperator(BaseOperator):
"""Create a PubSub topic.
By default, if the topic already exists, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
dag
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic')
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic')
)
The operator can be configured to fail if the topic already exists. ::
with DAG('failing DAG') as dag:
(
dag
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic')
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic',
fail_if_exists=True)
)
Both ``project`` and ``topic`` are templated so you can use
variables in them.
"""
template_fields = ['project', 'topic']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
project,
topic,
fail_if_exists=False,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
:param project: the GCP project ID where the topic will be created
:type project: string
:param topic: the topic to create. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: string
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(PubSubTopicCreateOperator, self).__init__(*args, **kwargs)
self.project = project
self.topic = topic
self.fail_if_exists = fail_if_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
hook.create_topic(self.project, self.topic,
fail_if_exists=self.fail_if_exists)
class PubSubSubscriptionCreateOperator(BaseOperator):
"""Create a PubSub subscription.
By default, the subscription will be created in ``topic_project``. If
``subscription_project`` is specified and the GCP credentials allow, the
Subscription can be created in a different project from its topic.
By default, if the subscription already exists, this operator will
not cause the DAG to fail. However, the topic must exist in the project. ::
with DAG('successful DAG') as dag:
(
dag
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription')
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription')
)
The operator can be configured to fail if the subscription already exists.
::
with DAG('failing DAG') as dag:
(
dag
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription')
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription', fail_if_exists=True)
)
Finally, subscription is not required. If not passed, the operator will
generated a universally unique identifier for the subscription's name. ::
with DAG('DAG') as dag:
(
dag >> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic')
)
``topic_project``, ``topic``, ``subscription``, and
``subscription`` are templated so you can use variables in them.
"""
template_fields = ['topic_project', 'topic', 'subscription',
'subscription_project']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
topic_project,
topic,
subscription=None,
subscription_project=None,
ack_deadline_secs=10,
fail_if_exists=False,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
:param topic_project: the GCP project ID where the topic exists
:type topic_project: string
:param topic: the topic to create. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: string
:param subscription: the Pub/Sub subscription name. If empty, a random
name will be generated using the uuid module
:type subscription: string
:param subscription_project: the GCP project ID where the subscription
will be created. If empty, ``topic_project`` will be used.
:type subscription_project: string
:param ack_deadline_secs: Number of seconds that a subscriber has to
acknowledge each message pulled from the subscription
:type ack_deadline_secs: int
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(PubSubSubscriptionCreateOperator, self).__init__(*args, **kwargs)
self.topic_project = topic_project
self.topic = topic
self.subscription = subscription
self.subscription_project = subscription_project
self.ack_deadline_secs = ack_deadline_secs
self.fail_if_exists = fail_if_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
return hook.create_subscription(
self.topic_project, self.topic, self.subscription,
self.subscription_project, self.ack_deadline_secs,
self.fail_if_exists)
class PubSubTopicDeleteOperator(BaseOperator):
"""Delete a PubSub topic.
By default, if the topic does not exist, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
dag
>> PubSubTopicDeleteOperator(project='my-project',
topic='non_existing_topic')
)
The operator can be configured to fail if the topic does not exist. ::
with DAG('failing DAG') as dag:
(
dag
>> PubSubTopicCreateOperator(project='my-project',
topic='non_existing_topic',
fail_if_not_exists=True)
)
Both ``project`` and ``topic`` are templated so you can use
variables in them.
"""
template_fields = ['project', 'topic']
ui_color = '#cb4335'
@apply_defaults
def __init__(
self,
project,
topic,
fail_if_not_exists=False,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
:param project: the GCP project ID in which to work (templated)
:type project: string
:param topic: the topic to delete. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: string
:param fail_if_not_exists: If True and the topic does not exist, fail
the task
:type fail_if_not_exists: bool
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(PubSubTopicDeleteOperator, self).__init__(*args, **kwargs)
self.project = project
self.topic = topic
self.fail_if_not_exists = fail_if_not_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
hook.delete_topic(self.project, self.topic,
fail_if_not_exists=self.fail_if_not_exists)
class PubSubSubscriptionDeleteOperator(BaseOperator):
"""Delete a PubSub subscription.
By default, if the subscription does not exist, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
dag
>> PubSubSubscriptionDeleteOperator(project='my-project',
subscription='non-existing')
)
The operator can be configured to fail if the subscription already exists.
::
with DAG('failing DAG') as dag:
(
dag
>> PubSubSubscriptionDeleteOperator(
project='my-project', subscription='non-existing',
fail_if_not_exists=True)
)
``project``, and ``subscription`` are templated so you can use
variables in them.
"""
template_fields = ['project', 'subscription']
ui_color = '#cb4335'
@apply_defaults
def __init__(
self,
project,
subscription,
fail_if_not_exists=False,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
:param project: the GCP project ID in which to work (templated)
:type project: string
:param subscription: the subscription to delete. Do not include the
full subscription path. In other words, instead of
``projects/{project}/subscription/{subscription}``, provide only
``{subscription}``. (templated)
:type subscription: string
:param fail_if_not_exists: If True and the subscription does not exist,
fail the task
:type fail_if_not_exists: bool
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(PubSubSubscriptionDeleteOperator, self).__init__(*args, **kwargs)
self.project = project
self.subscription = subscription
self.fail_if_not_exists = fail_if_not_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
hook.delete_subscription(self.project, self.subscription,
fail_if_not_exists=self.fail_if_not_exists)
class PubSubPublishOperator(BaseOperator):
"""Publish messages to a PubSub topic.
Each Task publishes all provided messages to the same topic
in a single GCP project. If the topic does not exist, this
task will fail. ::
from base64 import b64encode as b64e
m1 = {'data': b64e('Hello, World!'),
'attributes': {'type': 'greeting'}
}
m2 = {'data': b64e('Knock, knock')}
m3 = {'attributes': {'foo': ''}}
t1 = PubSubPublishOperator(
project='my-project',topic='my_topic',
messages=[m1, m2, m3],
create_topic=True,
dag=dag)
``project`` , ``topic``, and ``messages`` are templated so you can use
variables in them.
"""
template_fields = ['project', 'topic', 'messages']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
project,
topic,
messages,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
:param project: the GCP project ID in which to work (templated)
:type project: string
:param topic: the topic to which to publish. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: string
:param messages: a list of messages to be published to the
topic. Each message is a dict with one or more of the
following keys-value mappings:
* 'data': a base64-encoded string
* 'attributes': {'key1': 'value1', ...}
Each message must contain at least a non-empty 'data' value
or an attribute dict with at least one key. See
https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
(templated)
:type messages: list
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(PubSubPublishOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.project = project
self.topic = topic
self.messages = messages
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
hook.publish(self.project, self.topic, self.messages)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def parseargs(argv, longopts, longflags, required,usesrest):
args = {}
for f,flag,help in longflags:
try:
i = argv.index("--"+flag)
args[flag] = True
del argv[i]
except ValueError:
try:
i = argv.index("-"+f)
args[flag] = True
del argv[i]
except ValueError:
args[flag] = False
for k, key in longopts:
try:
i = argv.index("--"+key)
F = longopts[k,key][0].__class__(argv[i+1])
args[key] = F
del argv[i+1]
del argv[i]
except ValueError:
try:
i = argv.index("-"+k)
F = longopts[k,key][0].__class__(argv[i+1])
args[key] = F
del argv[i+1]
del argv[i]
except ValueError:
if longopts[k,key][0] == None:
if not args.get("help", args.get("h", False)):
print ("missing argument: --"+key + " -"+k)
sys.exit(0)
args[key] = longopts[k,key][0]
rest = [a for a in argv if len(argv)>0 and a[0] != "-"]
args["__anon__"] = rest
return args
def showHelp(argspec):
#FIXME: A little fragile, but nice
#
# First extract some information from the argspec
# Next format options, then flags
# Then display header
# Then options, in 2 columns based on formatting (left/right/defaults)
# Then flags, in 2 columns (left/right)
#
longopts, longflags,required,usesrest = argspec
# Build left, right and defaults parts of options lines
# We do this twice
lines = []
rlines, olines = [],[]
for k,key in longopts:
l = ""
if key in required:
l += " ** "
else:
l += " "
if k:
l += "-"+k+" "
if key:
l += "--"+key
r = ""
if longopts[k,key][1]:
r += longopts[k,key][1]
d = longopts[k,key][0]
if key in required:
rlines.append((l,r,d))
else:
olines.append((l,r,d))
lines = rlines + olines
# Build left and right halves of flag lines
flaglines = []
for f,flag,help in longflags:
l = ""
l += " "
if f:
l += "-"+f+" "
if flag:
l += "--"+flag+" "
r = ""
if help:
r = help
flaglines.append((l,r))
# Find out the maximum width flag/option/etc for formatting.
w = 0
for l,_,_ in lines:
w = max(len(l),w)
for l,_ in flaglines:
w = max(len(l),w)
#
# Display usage header
#
print ("")
print ("Usage:")
print ("\t" + sys.argv[0]+ " [options] [flags]")
#
# Display how the rest of the line is used - eg "files", if at all
#
# Some programs may use the unnamed options on the command line
if usesrest:
print (usesrest)
else:
print ("")
#
# Display options, then flags. Required options first.
#
print ("")
print ("Flags/options marked '**' below are required.")
print ("")
print ("Options:")
for l,r,d in lines:
print (l + (w-len(l))*" " + " " + r)
if d and d != '':
print (w*" "+ " Default: " + str(d))
print ("")
print ("Flags:")
for l,r in flaglines:
print (l + (w-len(l))*" " + " " + r)
if __name__ == "__main__":
import os
import pprint
argspec = [
{ ("p", "path" ): (".", "Directory to rummage around below for files to transcode & upload."),
("e", "exclude-pattern"): ("(done|encoded|unsorted|transcode.log|to_upload)", "Pattern/filespecs to exclude from walk"),
("u", "username"): ("", "Username for ftp server"),
("p", "password"): ("", "Password for ftp server"),
("s", "server"): ("ftp.blip.tv", "FTP Server to upload to"),
},
[("h","help", "Show some help on how to use this")],
["username", "password"],
""
]
#
# Handle a reading a json encoded config file. Probably something nicer that
# this would be good, but this will do for now.
#
if os.path.exists("transcode.conf"):
import cjson
g = open("transcode.conf", "rb")
Y_ = g.read()
g.close()
conf_args = cjson.decode(Y_)
else:
conf_args = {}
args = parseargs( sys.argv[1:], *argspec)
# FIXME: unify args & conf_args in a nicer, more sensible way.
args.update(conf_args)
#
# This can probably be automated base on "required" part of argspec.
# Not yet done though!
#
if args["help"] or args["username"]=="" or args["password"]=="":
if not args["help"]:
print ("USAGE ERROR:")
if args["username"] == "":
print ("\tusername must be given")
if args["password"] == "":
print ("\tpassword must be given")
print ("")
showHelp(argspec)
sys.exit(0)
pprint.pprint(args)
#
# Handle a reading a json encoded config file. Probably something nicer that
# this would be good, but this will do for now.
#
def readJSONConfig(path):
if os.path.exists(path):
import cjson
g = open(path, "rb")
Y_ = g.read()
g.close()
conf_args = cjson.decode(Y_)
else:
conf_args = {}
return conf_args
def checkArgs(args, argspec):
argsOK = True
missing = []
for required in argspec[2]:
default = None
for k,key in argspec[0]:
if key != required:
continue
default = argspec[0][k,key][0]
break
if args[required] == default:
missing.append(required)
argsOK = False
return argsOK, missing
def needToShowUsage(args, argspec):
argsOK, missing = checkArgs(args, argspec)
return args["help"] or not argsOK
def showUsageBasedOnHowUsed(args, argspec):
argsOK, missing = checkArgs(args, argspec)
if not args["help"]:
print ("USAGE ERROR:")
for required in missing:
print ("\t"+ "--"+required+ " must be given")
showHelp(argspec)
| |
"""Translation helper functions."""
import functools
import gettext as gettext_module
import os
import re
import sys
import warnings
from collections import OrderedDict
from threading import local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils.safestring import SafeData, mark_safe
from django.utils.translation import LANGUAGE_SESSION_KEY
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:\.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = re.compile(
r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$',
re.IGNORECASE
)
language_code_prefix_re = re.compile(r'^/(\w+([@-]\w+)?)(/|$)')
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
def to_locale(language):
"""Turn a language name (en-us) into a locale name (en_US)."""
language = language.lower()
parts = language.split('-')
try:
country = parts[1]
except IndexError:
return language
else:
# A language with > 2 characters after the dash only has its first
# character after the dash capitalized; e.g. sr-latn becomes sr_Latn.
# A language with 2 characters after the dash has both characters
# capitalized; e.g. en-us becomes en_US.
parts[1] = country.title() if len(country) > 2 else country.upper()
return parts[0] + '_' + '-'.join(parts[1:])
def to_language(locale):
"""Turn a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
Set up the GNUTranslations context with regard to output charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
domain = 'django'
def __init__(self, language, domain=None, localedirs=None):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
if domain is not None:
self.domain = domain
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._catalog = None
# If a language doesn't have a catalog, use the Germanic default for
# pluralization: anything except one is pluralized.
self.plural = lambda n: int(n != 1)
if self.domain == 'django':
if localedirs is not None:
# A module-level cache is used for caching 'django' translations
warnings.warn("localedirs is ignored when domain is 'django'.", RuntimeWarning)
localedirs = None
self._init_translation_catalog()
if localedirs:
for localedir in localedirs:
translation = self._new_gnu_trans(localedir)
self.merge(translation)
else:
self._add_installed_apps_translations()
self._add_local_translations()
if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and self._catalog is None:
# default lang should have at least one translation file available.
raise IOError("No translation files found for default language %s." % settings.LANGUAGE_CODE)
self._add_fallback(localedirs)
if self._catalog is None:
# No catalogs found for this language, set an empty catalog.
self._catalog = {}
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Return a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
return gettext_module.translation(
domain=self.domain,
localedir=localedir,
languages=[self.__locale],
codeset='utf-8',
fallback=use_null_fallback)
def _init_translation_catalog(self):
"""Create a base catalog using global django translations."""
settingsfile = sys.modules[settings.__module__].__file__
localedir = os.path.join(os.path.dirname(settingsfile), 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_installed_apps_translations(self):
"""Merge translations from each installed app."""
try:
app_configs = reversed(list(apps.get_app_configs()))
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time.")
for app_config in app_configs:
localedir = os.path.join(app_config.path, 'locale')
if os.path.exists(localedir):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merge translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self, localedirs=None):
"""Set the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):
return
if self.domain == 'django':
# Get from cache
default_translation = translation(settings.LANGUAGE_CODE)
else:
default_translation = DjangoTranslation(
settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs
)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
if not getattr(other, '_catalog', None):
return # NullTranslations() has no _catalog
if self._catalog is None:
# Take plural and _info from first catalog found (generally Django's).
self.plural = other.plural
self._info = other._info.copy()
self._catalog = other._catalog.copy()
else:
self._catalog.update(other._catalog)
def language(self):
"""Return the translation language."""
return self.__language
def to_language(self):
"""Return the translation language name."""
return self.__to_language
def translation(language):
"""
Return a translation object in the default 'django' domain.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetch the translation object for a given language and install it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Uninstall the active translation object so that further _() calls resolve
to the default translation object.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Make the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Return the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Return selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Return the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def gettext(message):
"""
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
if eol_message:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = translation_object.gettext(eol_message)
else:
# Return an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)('')
if isinstance(message, SafeData):
return mark_safe(result)
return result
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = gettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
elif isinstance(message, SafeData):
result = mark_safe(result)
return result
def gettext_noop(message):
"""
Mark strings for translation but don't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Return a string of the translation of either the singular or plural,
based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ngettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ngettext(singular, plural, number)
return result
def all_locale_paths():
"""
Return a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
@functools.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Check whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
@functools.lru_cache()
def get_languages():
"""
Cache of settings.LANGUAGES in an OrderedDict for easy lookups by key.
"""
return OrderedDict(settings.LANGUAGES)
@functools.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Return the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raise LookupError if nothing is found.
If `strict` is False (the default), look for an alternative
country-specific variant when the currently checked is not found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Return the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyze the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
supported_lang_codes = get_languages()
if hasattr(request, 'session'):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if lang_code in supported_lang_codes and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
@functools.lru_cache(maxsize=1000)
def parse_accept_lang_header(lang_string):
"""
Parse the lang_string, which is the body of an HTTP Accept-Language
header, and return a tuple of (lang, q-value), ordered by 'q' values.
Return an empty tuple if there are any format errors in lang_string.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return ()
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i:i + 3]
if first:
return ()
if priority:
priority = float(priority)
else:
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return tuple(result)
| |
import base64
import json
from twisted.internet.defer import inlineCallbacks, DeferredQueue, returnValue
from twisted.web.http_headers import Headers
from twisted.web import http
from twisted.web.server import NOT_DONE_YET
from vumi.config import ConfigContext
from vumi.message import TransportUserMessage, TransportEvent
from vumi.tests.helpers import VumiTestCase
from vumi.tests.utils import MockHttpServer, LogCatcher
from vumi.transports.vumi_bridge.client import StreamingClient
from vumi.utils import http_request_full
from go.apps.http_api.resource import (
StreamResourceMixin, StreamingConversationResource)
from go.apps.tests.helpers import AppWorkerHelper
from go.apps.http_api.vumi_app import StreamingHTTPWorker
class TestStreamingHTTPWorker(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.app_helper = self.add_helper(AppWorkerHelper(StreamingHTTPWorker))
self.config = {
'health_path': '/health/',
'web_path': '/foo',
'web_port': 0,
'metrics_prefix': 'metrics_prefix.',
'conversation_cache_ttl': 0,
}
self.app = yield self.app_helper.get_app_worker(self.config)
self.addr = self.app.webserver.getHost()
self.url = 'http://%s:%s%s' % (
self.addr.host, self.addr.port, self.config['web_path'])
conv_config = {
'http_api': {
'api_tokens': [
'token-1',
'token-2',
'token-3',
],
'metric_store': 'metric_store',
}
}
conversation = yield self.app_helper.create_conversation(
config=conv_config)
yield self.app_helper.start_conversation(conversation)
self.conversation = yield self.app_helper.get_conversation(
conversation.key)
self.auth_headers = {
'Authorization': ['Basic ' + base64.b64encode('%s:%s' % (
conversation.user_account.key, 'token-1'))],
}
self.client = StreamingClient()
# Mock server to test HTTP posting of inbound messages & events
self.mock_push_server = MockHttpServer(self.handle_request)
yield self.mock_push_server.start()
self.add_cleanup(self.mock_push_server.stop)
self.push_calls = DeferredQueue()
self._setup_wait_for_request()
self.add_cleanup(self._wait_for_requests)
def _setup_wait_for_request(self):
# Hackery to wait for the request to finish
self._req_state = {
'queue': DeferredQueue(),
'expected': 0,
}
orig_track = StreamingConversationResource.track_request
orig_release = StreamingConversationResource.release_request
def track_wrapper(*args, **kw):
self._req_state['expected'] += 1
return orig_track(*args, **kw)
def release_wrapper(*args, **kw):
return orig_release(*args, **kw).addCallback(
self._req_state['queue'].put)
self.patch(
StreamingConversationResource, 'track_request', track_wrapper)
self.patch(
StreamingConversationResource, 'release_request', release_wrapper)
@inlineCallbacks
def _wait_for_requests(self):
while self._req_state['expected'] > 0:
yield self._req_state['queue'].get()
self._req_state['expected'] -= 1
def handle_request(self, request):
self.push_calls.put(request)
return NOT_DONE_YET
@inlineCallbacks
def pull_message(self, count=1):
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
messages = DeferredQueue()
errors = DeferredQueue()
receiver = self.client.stream(
TransportUserMessage, messages.put, errors.put, url,
Headers(self.auth_headers))
received_messages = []
for msg_id in range(count):
yield self.app_helper.make_dispatch_inbound(
'in %s' % (msg_id,), message_id=str(msg_id),
conv=self.conversation)
recv_msg = yield messages.get()
received_messages.append(recv_msg)
receiver.disconnect()
returnValue((receiver, received_messages))
def assert_bad_request(self, response, reason):
self.assertEqual(response.code, http.BAD_REQUEST)
self.assertEqual(
response.headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
data = json.loads(response.delivered_body)
self.assertEqual(data, {
"success": False,
"reason": reason,
})
@inlineCallbacks
def test_proxy_buffering_headers_off(self):
# This is the default, but we patch it anyway to make sure we're
# testing the right thing should the default change.
self.patch(StreamResourceMixin, 'proxy_buffering', False)
receiver, received_messages = yield self.pull_message()
headers = receiver._response.headers
self.assertEqual(headers.getRawHeaders('x-accel-buffering'), ['no'])
@inlineCallbacks
def test_proxy_buffering_headers_on(self):
self.patch(StreamResourceMixin, 'proxy_buffering', True)
receiver, received_messages = yield self.pull_message()
headers = receiver._response.headers
self.assertEqual(headers.getRawHeaders('x-accel-buffering'), ['yes'])
@inlineCallbacks
def test_content_type(self):
receiver, received_messages = yield self.pull_message()
headers = receiver._response.headers
self.assertEqual(
headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
@inlineCallbacks
def test_messages_stream(self):
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
messages = DeferredQueue()
errors = DeferredQueue()
receiver = self.client.stream(
TransportUserMessage, messages.put, errors.put, url,
Headers(self.auth_headers))
msg1 = yield self.app_helper.make_dispatch_inbound(
'in 1', message_id='1', conv=self.conversation)
msg2 = yield self.app_helper.make_dispatch_inbound(
'in 2', message_id='2', conv=self.conversation)
rm1 = yield messages.get()
rm2 = yield messages.get()
receiver.disconnect()
# Sometimes messages arrive out of order if we're hitting real redis.
rm1, rm2 = sorted([rm1, rm2], key=lambda m: m['message_id'])
self.assertEqual(msg1['message_id'], rm1['message_id'])
self.assertEqual(msg2['message_id'], rm2['message_id'])
self.assertEqual(errors.size, None)
@inlineCallbacks
def test_events_stream(self):
url = '%s/%s/events.json' % (self.url, self.conversation.key)
events = DeferredQueue()
errors = DeferredQueue()
receiver = yield self.client.stream(TransportEvent, events.put,
events.put, url,
Headers(self.auth_headers))
msg1 = yield self.app_helper.make_stored_outbound(
self.conversation, 'out 1', message_id='1')
ack1 = yield self.app_helper.make_dispatch_ack(
msg1, conv=self.conversation)
msg2 = yield self.app_helper.make_stored_outbound(
self.conversation, 'out 2', message_id='2')
ack2 = yield self.app_helper.make_dispatch_ack(
msg2, conv=self.conversation)
ra1 = yield events.get()
ra2 = yield events.get()
receiver.disconnect()
# Sometimes messages arrive out of order if we're hitting real redis.
if ra1['event_id'] != ack1['event_id']:
ra1, ra2 = ra2, ra1
self.assertEqual(ack1['event_id'], ra1['event_id'])
self.assertEqual(ack2['event_id'], ra2['event_id'])
self.assertEqual(errors.size, None)
@inlineCallbacks
def test_missing_auth(self):
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
queue = DeferredQueue()
receiver = self.client.stream(
TransportUserMessage, queue.put, queue.put, url)
response = yield receiver.get_response()
self.assertEqual(response.code, http.UNAUTHORIZED)
self.assertEqual(response.headers.getRawHeaders('www-authenticate'), [
'basic realm="Conversation Realm"'])
@inlineCallbacks
def test_invalid_auth(self):
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
queue = DeferredQueue()
headers = Headers({
'Authorization': ['Basic %s' % (base64.b64encode('foo:bar'),)],
})
receiver = self.client.stream(
TransportUserMessage, queue.put, queue.put, url, headers)
response = yield receiver.get_response()
self.assertEqual(response.code, http.UNAUTHORIZED)
self.assertEqual(response.headers.getRawHeaders('www-authenticate'), [
'basic realm="Conversation Realm"'])
@inlineCallbacks
def test_send_to(self):
msg = {
'to_addr': '+2345',
'content': 'foo',
'message_id': 'evil_id',
}
# TaggingMiddleware.add_tag_to_msg(msg, self.tag)
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assertEqual(
response.headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
self.assertEqual(response.code, http.OK)
put_msg = json.loads(response.delivered_body)
[sent_msg] = self.app_helper.get_dispatched_outbound()
self.assertEqual(sent_msg['to_addr'], sent_msg['to_addr'])
self.assertEqual(sent_msg['helper_metadata'], {
'go': {
'conversation_key': self.conversation.key,
'conversation_type': 'http_api',
'user_account': self.conversation.user_account.key,
},
})
# We do not respect the message_id that's been given.
self.assertNotEqual(sent_msg['message_id'], msg['message_id'])
self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
self.assertEqual(sent_msg['to_addr'], msg['to_addr'])
self.assertEqual(sent_msg['from_addr'], None)
@inlineCallbacks
def test_send_to_within_content_length_limit(self):
self.conversation.config['http_api'].update({
'content_length_limit': 182,
})
yield self.conversation.save()
msg = {
'content': 'foo',
'to_addr': '+1234',
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assertEqual(
response.headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
put_msg = json.loads(response.delivered_body)
self.assertEqual(response.code, http.OK)
[sent_msg] = self.app_helper.get_dispatched_outbound()
self.assertEqual(sent_msg['to_addr'], put_msg['to_addr'])
self.assertEqual(sent_msg['helper_metadata'], {
'go': {
'conversation_key': self.conversation.key,
'conversation_type': 'http_api',
'user_account': self.conversation.user_account.key,
},
})
self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
self.assertEqual(sent_msg['session_event'], None)
self.assertEqual(sent_msg['to_addr'], '+1234')
self.assertEqual(sent_msg['from_addr'], None)
@inlineCallbacks
def test_send_to_content_too_long(self):
self.conversation.config['http_api'].update({
'content_length_limit': 10,
})
yield self.conversation.save()
msg = {
'content': "This message is longer than 10 characters.",
'to_addr': '+1234',
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(
url, json.dumps(msg), self.auth_headers, method='PUT')
self.assert_bad_request(
response, "Payload content too long: 42 > 10")
@inlineCallbacks
def test_send_to_with_evil_content(self):
msg = {
'content': 0xBAD,
'to_addr': '+1234',
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assert_bad_request(
response, "Invalid or missing value for payload key 'content'")
@inlineCallbacks
def test_send_to_with_evil_to_addr(self):
msg = {
'content': 'good',
'to_addr': 1234,
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assert_bad_request(
response, "Invalid or missing value for payload key 'to_addr'")
@inlineCallbacks
def test_in_reply_to(self):
inbound_msg = yield self.app_helper.make_stored_inbound(
self.conversation, 'in 1', message_id='1')
msg = {
'content': 'foo',
'in_reply_to': inbound_msg['message_id'],
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assertEqual(
response.headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
put_msg = json.loads(response.delivered_body)
self.assertEqual(response.code, http.OK)
[sent_msg] = self.app_helper.get_dispatched_outbound()
self.assertEqual(sent_msg['to_addr'], put_msg['to_addr'])
self.assertEqual(sent_msg['helper_metadata'], {
'go': {
'conversation_key': self.conversation.key,
'conversation_type': 'http_api',
'user_account': self.conversation.user_account.key,
},
})
self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
self.assertEqual(sent_msg['session_event'], None)
self.assertEqual(sent_msg['to_addr'], inbound_msg['from_addr'])
self.assertEqual(sent_msg['from_addr'], '9292')
@inlineCallbacks
def test_in_reply_to_within_content_length_limit(self):
self.conversation.config['http_api'].update({
'content_length_limit': 182,
})
yield self.conversation.save()
inbound_msg = yield self.app_helper.make_stored_inbound(
self.conversation, 'in 1', message_id='1')
msg = {
'content': 'foo',
'in_reply_to': inbound_msg['message_id'],
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assertEqual(
response.headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
put_msg = json.loads(response.delivered_body)
self.assertEqual(response.code, http.OK)
[sent_msg] = self.app_helper.get_dispatched_outbound()
self.assertEqual(sent_msg['to_addr'], put_msg['to_addr'])
self.assertEqual(sent_msg['helper_metadata'], {
'go': {
'conversation_key': self.conversation.key,
'conversation_type': 'http_api',
'user_account': self.conversation.user_account.key,
},
})
self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
self.assertEqual(sent_msg['session_event'], None)
self.assertEqual(sent_msg['to_addr'], inbound_msg['from_addr'])
self.assertEqual(sent_msg['from_addr'], '9292')
@inlineCallbacks
def test_in_reply_to_content_too_long(self):
self.conversation.config['http_api'].update({
'content_length_limit': 10,
})
yield self.conversation.save()
inbound_msg = yield self.app_helper.make_stored_inbound(
self.conversation, 'in 1', message_id='1')
msg = {
'content': "This message is longer than 10 characters.",
'in_reply_to': inbound_msg['message_id'],
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(
url, json.dumps(msg), self.auth_headers, method='PUT')
self.assert_bad_request(
response, "Payload content too long: 42 > 10")
@inlineCallbacks
def test_in_reply_to_with_evil_content(self):
inbound_msg = yield self.app_helper.make_stored_inbound(
self.conversation, 'in 1', message_id='1')
msg = {
'content': 0xBAD,
'in_reply_to': inbound_msg['message_id'],
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assert_bad_request(
response, "Invalid or missing value for payload key 'content'")
@inlineCallbacks
def test_invalid_in_reply_to(self):
msg = {
'content': 'foo',
'in_reply_to': '1', # this doesn't exist
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assert_bad_request(response, 'Invalid in_reply_to value')
@inlineCallbacks
def test_invalid_in_reply_to_with_missing_conversation_key(self):
# create a message with no conversation
inbound_msg = self.app_helper.make_inbound('in 1', message_id='msg-1')
vumi_api = self.app_helper.vumi_helper.get_vumi_api()
yield vumi_api.mdb.add_inbound_message(inbound_msg)
msg = {
'content': 'foo',
'in_reply_to': inbound_msg['message_id'],
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
with LogCatcher(message='Invalid reply to message <Message .*>'
' which has no conversation key') as lc:
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
[error_log] = lc.messages()
self.assert_bad_request(response, "Invalid in_reply_to value")
self.assertTrue(inbound_msg['message_id'] in error_log)
@inlineCallbacks
def test_in_reply_to_with_evil_session_event(self):
inbound_msg = yield self.app_helper.make_stored_inbound(
self.conversation, 'in 1', message_id='1')
msg = {
'content': 'foo',
'in_reply_to': inbound_msg['message_id'],
'session_event': 0xBAD5E55104,
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assert_bad_request(
response,
"Invalid or missing value for payload key 'session_event'")
self.assertEqual(self.app_helper.get_dispatched_outbound(), [])
@inlineCallbacks
def test_in_reply_to_with_evil_message_id(self):
inbound_msg = yield self.app_helper.make_stored_inbound(
self.conversation, 'in 1', message_id='1')
msg = {
'content': 'foo',
'in_reply_to': inbound_msg['message_id'],
'message_id': 'evil_id'
}
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
response = yield http_request_full(url, json.dumps(msg),
self.auth_headers, method='PUT')
self.assertEqual(response.code, http.OK)
self.assertEqual(
response.headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
put_msg = json.loads(response.delivered_body)
[sent_msg] = self.app_helper.get_dispatched_outbound()
# We do not respect the message_id that's been given.
self.assertNotEqual(sent_msg['message_id'], msg['message_id'])
self.assertEqual(sent_msg['message_id'], put_msg['message_id'])
self.assertEqual(sent_msg['to_addr'], inbound_msg['from_addr'])
self.assertEqual(sent_msg['from_addr'], '9292')
@inlineCallbacks
def test_metric_publishing(self):
metric_data = [
("vumi.test.v1", 1234, 'SUM'),
("vumi.test.v2", 3456, 'AVG'),
]
url = '%s/%s/metrics.json' % (self.url, self.conversation.key)
response = yield http_request_full(
url, json.dumps(metric_data), self.auth_headers, method='PUT')
self.assertEqual(response.code, http.OK)
self.assertEqual(
response.headers.getRawHeaders('content-type'),
['application/json; charset=utf-8'])
prefix = "go.campaigns.test-0-user.stores.metric_store"
self.assertEqual(
self.app_helper.get_published_metrics(self.app),
[("%s.vumi.test.v1" % prefix, 1234),
("%s.vumi.test.v2" % prefix, 3456)])
@inlineCallbacks
def test_concurrency_limits(self):
config = yield self.app.get_config(None)
concurrency = config.concurrency_limit
queue = DeferredQueue()
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
max_receivers = [self.client.stream(
TransportUserMessage, queue.put, queue.put, url,
Headers(self.auth_headers)) for _ in range(concurrency)]
for i in range(concurrency):
msg = yield self.app_helper.make_dispatch_inbound(
'in %s' % (i,), message_id=str(i), conv=self.conversation)
received = yield queue.get()
self.assertEqual(msg['message_id'], received['message_id'])
maxed_out_resp = yield http_request_full(
url, method='GET', headers=self.auth_headers)
self.assertEqual(maxed_out_resp.code, 403)
self.assertTrue(
'Too many concurrent connections' in maxed_out_resp.delivered_body)
[r.disconnect() for r in max_receivers]
@inlineCallbacks
def test_disabling_concurrency_limit(self):
conv_resource = StreamingConversationResource(
self.app, self.conversation.key)
# negative concurrency limit disables it
ctxt = ConfigContext(user_account=self.conversation.user_account.key,
concurrency_limit=-1)
config = yield self.app.get_config(msg=None, ctxt=ctxt)
self.assertTrue(
(yield conv_resource.is_allowed(
config, self.conversation.user_account.key)))
@inlineCallbacks
def test_backlog_on_connect(self):
for i in range(10):
yield self.app_helper.make_dispatch_inbound(
'in %s' % (i,), message_id=str(i), conv=self.conversation)
queue = DeferredQueue()
url = '%s/%s/messages.json' % (self.url, self.conversation.key)
receiver = self.client.stream(
TransportUserMessage, queue.put, queue.put, url,
Headers(self.auth_headers))
for i in range(10):
received = yield queue.get()
self.assertEqual(received['message_id'], str(i))
receiver.disconnect()
@inlineCallbacks
def test_health_response(self):
health_url = 'http://%s:%s%s' % (
self.addr.host, self.addr.port, self.config['health_path'])
response = yield http_request_full(health_url, method='GET')
self.assertEqual(response.delivered_body, '0')
yield self.app_helper.make_dispatch_inbound(
'in 1', message_id='1', conv=self.conversation)
queue = DeferredQueue()
stream_url = '%s/%s/messages.json' % (self.url, self.conversation.key)
stream_receiver = self.client.stream(
TransportUserMessage, queue.put, queue.put, stream_url,
Headers(self.auth_headers))
yield queue.get()
response = yield http_request_full(health_url, method='GET')
self.assertEqual(response.delivered_body, '1')
stream_receiver.disconnect()
response = yield http_request_full(health_url, method='GET')
self.assertEqual(response.delivered_body, '0')
self.assertEqual(self.app.client_manager.clients, {
'sphex.stream.message.%s' % (self.conversation.key,): []
})
@inlineCallbacks
def test_post_inbound_message(self):
# Set the URL so stuff is HTTP Posted instead of streamed.
self.conversation.config['http_api'].update({
'push_message_url': self.mock_push_server.url,
})
yield self.conversation.save()
msg_d = self.app_helper.make_dispatch_inbound(
'in 1', message_id='1', conv=self.conversation)
req = yield self.push_calls.get()
posted_json_data = req.content.read()
req.finish()
msg = yield msg_d
posted_msg = TransportUserMessage.from_json(posted_json_data)
self.assertEqual(posted_msg['message_id'], msg['message_id'])
@inlineCallbacks
def test_post_inbound_message_201_response(self):
# Set the URL so stuff is HTTP Posted instead of streamed.
self.conversation.config['http_api'].update({
'push_message_url': self.mock_push_server.url,
})
yield self.conversation.save()
with LogCatcher(message='Got unexpected response code') as lc:
msg_d = self.app_helper.make_dispatch_inbound(
'in 1', message_id='1', conv=self.conversation)
req = yield self.push_calls.get()
req.setResponseCode(201)
req.finish()
yield msg_d
self.assertEqual(lc.messages(), [])
@inlineCallbacks
def test_post_inbound_message_500_response(self):
# Set the URL so stuff is HTTP Posted instead of streamed.
self.conversation.config['http_api'].update({
'push_message_url': self.mock_push_server.url,
})
yield self.conversation.save()
with LogCatcher(message='Got unexpected response code') as lc:
msg_d = self.app_helper.make_dispatch_inbound(
'in 1', message_id='1', conv=self.conversation)
req = yield self.push_calls.get()
req.setResponseCode(500)
req.finish()
yield msg_d
[warning_log] = lc.messages()
self.assertTrue(self.mock_push_server.url in warning_log)
self.assertTrue('500' in warning_log)
@inlineCallbacks
def test_post_inbound_event(self):
# Set the URL so stuff is HTTP Posted instead of streamed.
self.conversation.config['http_api'].update({
'push_event_url': self.mock_push_server.url,
})
yield self.conversation.save()
msg = yield self.app_helper.make_stored_outbound(
self.conversation, 'out 1', message_id='1')
event_d = self.app_helper.make_dispatch_ack(
msg, conv=self.conversation)
req = yield self.push_calls.get()
posted_json_data = req.content.read()
req.finish()
ack = yield event_d
self.assertEqual(TransportEvent.from_json(posted_json_data), ack)
@inlineCallbacks
def test_bad_urls(self):
def assert_not_found(url, headers={}):
d = http_request_full(self.url, method='GET', headers=headers)
d.addCallback(lambda r: self.assertEqual(r.code, http.NOT_FOUND))
return d
yield assert_not_found(self.url)
yield assert_not_found(self.url + '/')
yield assert_not_found('%s/%s' % (self.url, self.conversation.key),
headers=self.auth_headers)
yield assert_not_found('%s/%s/' % (self.url, self.conversation.key),
headers=self.auth_headers)
yield assert_not_found('%s/%s/foo' % (self.url, self.conversation.key),
headers=self.auth_headers)
@inlineCallbacks
def test_send_message_command(self):
yield self.app_helper.dispatch_command(
'send_message',
user_account_key=self.conversation.user_account.key,
conversation_key=self.conversation.key,
command_data={
u'batch_id': u'batch-id',
u'content': u'foo',
u'to_addr': u'to_addr',
u'msg_options': {
u'helper_metadata': {
u'tag': {
u'tag': [u'longcode', u'default10080']
}
},
u'from_addr': u'default10080',
}
})
[msg] = self.app_helper.get_dispatched_outbound()
self.assertEqual(msg.payload['to_addr'], "to_addr")
self.assertEqual(msg.payload['from_addr'], "default10080")
self.assertEqual(msg.payload['content'], "foo")
self.assertEqual(msg.payload['message_type'], "user_message")
self.assertEqual(
msg.payload['helper_metadata']['go']['user_account'],
self.conversation.user_account.key)
self.assertEqual(
msg.payload['helper_metadata']['tag']['tag'],
['longcode', 'default10080'])
@inlineCallbacks
def test_process_command_send_message_in_reply_to(self):
msg = yield self.app_helper.make_stored_inbound(
self.conversation, "foo")
yield self.app_helper.dispatch_command(
'send_message',
user_account_key=self.conversation.user_account.key,
conversation_key=self.conversation.key,
command_data={
u'batch_id': u'batch-id',
u'content': u'foo',
u'to_addr': u'to_addr',
u'msg_options': {
u'helper_metadata': {
u'tag': {
u'tag': [u'longcode', u'default10080']
}
},
u'transport_name': u'smpp_transport',
u'in_reply_to': msg['message_id'],
u'transport_type': u'sms',
u'from_addr': u'default10080',
}
})
[sent_msg] = self.app_helper.get_dispatched_outbound()
self.assertEqual(sent_msg['to_addr'], msg['from_addr'])
self.assertEqual(sent_msg['content'], 'foo')
self.assertEqual(sent_msg['in_reply_to'], msg['message_id'])
| |
#
# lasoverlapPro.py
#
# (c) 2013, martin isenburg - http://rapidlasso.com
# rapidlasso GmbH - fast tools to catch reality
#
# uses lasoverlap.exe to check the flightline overlap
# and alignment for a folder of LiDAR files
#
# LiDAR input: LAS/LAZ/BIN/TXT/SHP/BIL/ASC/DTM
# raster output: BIL/ASC/IMG/TIF/DTM/PNG/JPG
#
# for licensing see http://lastools.org/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lasoverlap production ...")
### get number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to LAStools
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))
### make sure the path does not contain spaces
if lastools_path.count(" ") > 0:
gp.AddMessage("Error. Path to .\\lastools installation contains spaces.")
gp.AddMessage("This does not work: " + lastools_path)
gp.AddMessage("This would work: C:\\software\\lastools")
sys.exit(1)
### complete the path to where the LAStools executables are
lastools_path = lastools_path + "\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lasoverlap executable
lasoverlap_path = lastools_path+"\\lasoverlap.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lasoverlap.exe at " + lasoverlap_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasoverlap_path + " ...")
### create the command string for lasoverlap.exe
command = ['"'+lasoverlap_path+'"']
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### counting up the arguments
c = 1
### add input LiDAR
wildcards = sys.argv[c+1].split()
for wildcard in wildcards:
command.append("-i")
command.append('"' + sys.argv[c] + "\\" + wildcard + '"')
c = c + 2
### maybe the input files are flightlines
if sys.argv[c] == "true":
command.append("-files_are_flightlines")
c = c + 1
### maybe we should merge all files into one
if sys.argv[c] == "true":
command.append("-merged")
c = c + 1
### maybe use a user-defined step size
if sys.argv[c] != "2":
command.append("-step")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### what should we raster
if sys.argv[c] != "elevation":
command.append("-" + sys.argv[c])
c = c + 1
### what operation should we use
if sys.argv[c] != "lowest":
command.append("-" + sys.argv[c])
c = c + 1
### should we fill a few pixels
if sys.argv[c] != "0":
command.append("-fill")
command.append(sys.argv[c])
c = c + 1
### what should we output
if sys.argv[c] == "actual values":
command.append("-values")
c = c + 1
### maybe no overlap raster
if sys.argv[c] != "true":
command.append("-no_over")
c = c + 1
### maybe use a user-defined max diff
if sys.argv[c] != "5":
command.append("-max_over")
command.append(sys.argv[c])
c = c + 1
### maybe no difference raster
if sys.argv[c] != "true":
command.append("-no_diff")
c = c + 1
### maybe use a user-defined max diff
if sys.argv[c].replace(",",".") != "0.5":
command.append("-max_diff")
command.append(sys.argv[c].replace(",","."))
c = c + 1
### what should we triangulate
if sys.argv[c] == "ground points only":
command.append("-keep_class")
command.append("2")
elif sys.argv[c] == "ground and keypoints":
command.append("-keep_class")
command.append("2")
command.append("8")
elif sys.argv[c] == "ground and buildings":
command.append("-keep_class")
command.append("2")
command.append("6")
elif sys.argv[c] == "last return only":
command.append("-last_only")
elif sys.argv[c] == "first return only":
command.append("-first_only")
c = c + 1
### should we use the bounding box
if sys.argv[c] == "true":
command.append("-use_bb")
c = c + 1
### should we use the tile bounding box
if sys.argv[c] == "true":
command.append("-use_tile_bb")
c = c + 1
### maybe an output format was selected
if sys.argv[c] != "#":
command.append("-o" + sys.argv[c])
c = c + 1
### maybe an output file name was selected
if sys.argv[c] != "#":
command.append("-o")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe an output directory was selected
if sys.argv[c] != "#":
command.append("-odir")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe an output appendix was selected
if sys.argv[c] != "#":
command.append("-odix")
command.append('"'+sys.argv[c]+'"')
c = c + 1
### maybe we should run on multiple cores
if sys.argv[c] != "1":
command.append("-cores")
command.append(sys.argv[c])
c = c + 1
### maybe there are additional input options
if sys.argv[c] != "#":
additional_options = sys.argv[c].split()
for option in additional_options:
command.append(option)
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
command[0] = command[0].strip('"')
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
command[i] = command[i].strip('"')
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasoverlap
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lasoverlap failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lasoverlap done.")
| |
"""
Copyright (C) 2010 - 2013 TopCoder Inc., All Rights Reserved.
This module contains the definitions of syntax tree nodes. Each node class
defines the __repr__ method to support repr() actions on these node objects
for debugging purposes, which converts a syntax tree to a linear form.
In order to support future extensions to the syntax tree, we employ the visitor
pattern in this module, so that each node class defines the accept method, which
takes a visitor object as the parameter. Because Python cannot dispatch method
calls with respect to parameter types (as in Java), we call different visit_xxx
methods instead of one single visit method.
@version 1.0 (Healthcare Fraud Prevention - Query Parsing and Query Generation)
@author: TCSASSEMBLER
"""
class Query:
"""
Query -> Clause | Group | And_sequence | Or_sequence
"""
def __init__(self, query):
self.query = query
def __repr__(self):
return "Query(" + repr(self.query) + ")"
def accept(self, visitor):
return visitor.visit_query(self)
class AndSequence:
"""
And_sequence -> Group "AND" And_sequence_tail
"""
def __init__(self, group, tail):
self.group = group
self.tail = tail
def __repr__(self):
return "AndSequence(" + repr(self.group) + "," + repr(self.tail) + ")"
def accept(self, visitor):
return visitor.visit_and_sequence(self)
class AndSequenceTail:
"""
And_sequence_tail -> Group | Group "AND" And_sequence_tail
"""
def __init__(self, group, tail=None):
self.group = group
self.tail = tail
def __repr__(self):
if self.tail is None:
return "AndSequenceTail(" + repr(self.group) + ")"
else:
return "AndSequenceTail(" + repr(self.group) + "," + \
repr(self.tail) + ")"
def accept(self, visitor):
return visitor.visit_and_sequence_tail(self)
class OrSequence:
"""
Or_sequence -> Group "OR" Or_sequence_tail
"""
def __init__(self, group, tail):
self.group = group
self.tail = tail
def __repr__(self):
return "OrSequence(" + repr(self.group) + "," + repr(self.tail) + ")"
def accept(self, visitor):
return visitor.visit_or_sequence(self)
class OrSequenceTail:
"""
Or_sequence_tail -> Group | Group "OR" Or_sequence_tail
"""
def __init__(self, group, tail=None):
self.group = group
self.tail = tail
def __repr__(self):
if self.tail is None:
return "OrSequenceTail(" + repr(self.group) + ")"
else:
return "OrSequenceTail(" + repr(self.group) + "," + \
repr(self.tail) + ")"
def accept(self, visitor):
return visitor.visit_or_sequence_tail(self)
class Group:
"""
Group -> Affirmative_group | Negated_group
"""
def __init__(self, group):
self.group = group
def __repr__(self):
return "Group(" + repr(self.group) + ")"
def accept(self, visitor):
return visitor.visit_group(self)
class AffirmativeGroup:
"""
Affirmative_group -> "(" Query ")"
"""
def __init__(self, query):
self.query = query
def __repr__(self):
return "AffirmativeGroup(" + repr(self.query) + ")"
def accept(self, visitor):
return visitor.visit_affirmative_group(self)
class NegatedGroup:
"""
Negated_group -> "NOT" "(" Query ")"
"""
def __init__(self, query):
self.query = query
def __repr__(self):
return "NegatedGroup(" + repr(self.query) + ")"
def accept(self, visitor):
return visitor.visit_negated_group(self)
class Clause:
"""
Clause -> Numerical_clause | Logical_clause | Textual_clause
"""
def __init__(self, clause):
self.clause = clause
def __repr__(self):
return "Clause(" + repr(self.clause) + ")"
def accept(self, visitor):
return visitor.visit_clause(self)
class NumericalClause:
"""
Numerical_clause -> Numerical_attribute Numerical_operator Numerical_value
"""
def __init__(self, attribute, operator, value):
self.attribute = attribute
self.operator = operator
self.value = value
def __repr__(self):
return "NumericalClause(" + repr(self.attribute) + "," \
+ repr(self.operator) + "," \
+ repr(self.value) + ")"
def accept(self, visitor):
return visitor.visit_numerical_clause(self)
class NumericalAttribute:
"""
Numerical_attribute -> "County code" | "State code" | ...
"""
def __init__(self, attribute, code):
self.attribute = attribute
self.code = code
def __repr__(self):
return "NumericalAttribute(" + repr(self.attribute) + ")"
def accept(self, visitor):
return visitor.visit_numerical_attribute(self)
class NumericalOperator:
"""
Numerical_operator -> "equal to" | "less than" | "less than or equal to"
| "greater than" | "greater than or equal to"
"""
def __init__(self, operator, code):
self.operator = operator
self.code = code
def __repr__(self):
return "NumericalOperator(" + repr(self.operator) + ")"
def accept(self, visitor):
return visitor.visit_numerical_operator(self)
class NumericalValue:
"""
Numerical_value -> Integer_value | Negative_integer_value
| Real_value | Negative_real_value
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return "NumericalValue(" + repr(self.value) + ")"
def accept(self, visitor):
return visitor.visit_numerical_value(self)
class NegativeIntegerValue:
"""
Negative_integer_value -> "-" Integer_value
"""
def __init__(self, integer_value):
self.integer_value = integer_value
def __repr__(self):
return "NegativeIntegerValue(" + repr(self.integer_value) + ")"
def accept(self, visitor):
return visitor.visit_negative_integer_value(self)
class NegativeRealValue:
"""
Negative_real_value -> "-" Real_value
"""
def __init__(self, real_value):
self.real_value = real_value
def __repr__(self):
return "NegativeRealValue(" + repr(self.real_value) + ")"
def accept(self, visitor):
return visitor.visit_negative_real_value(self)
class RealValue:
"""
Real_value -> Integer_value "." Integer_value
"""
def __init__(self, integer_part, fractional_part):
self.integer_part = integer_part
self.fractional_part = fractional_part
def __repr__(self):
return "RealValue(" + repr(self.integer_part) + "," \
+ repr(self.fractional_part) + ")"
def to_negative(self):
"""
This is a helper function, which wraps the current value as the
negative counterpart. This is used in Parser.parse_numerical_value for
better code structure and readability.
"""
return NegativeRealValue(self)
def accept(self, visitor):
return visitor.visit_real_value(self)
class IntegerValue:
"""
Integer_value -> Digit+
Digit -> "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9"
"""
def __init__(self, digits):
self.digits = digits
def __repr__(self):
return "IntegerValue(" + repr(self.digits) + ")"
def to_negative(self):
"""
This is a helper function, which wraps the current value as the
negative counterpart. This is used in Parser.parse_numerical_value for
better code structure and readability.
"""
return NegativeIntegerValue(self)
def accept(self, visitor):
return visitor.visit_integer_value(self)
class LogicalClause:
"""
Logical_clause -> Logical_attribute "is" Logical_value
Although currently the only valid operator is "is", we still pass in an
operator as the parameter for extension in the future.
"""
def __init__(self, attribute, operator, value):
self.attribute = attribute
self.operator = operator
self.value = value
def __repr__(self):
return "LogicalClause(" + repr(self.attribute) + "," \
+ repr(self.operator) + "," \
+ repr(self.value) + ")"
def accept(self, visitor):
return visitor.visit_logical_clause(self)
class LogicalAttribute:
"""
Logical_attribute -> "End stage renal disease indicator" | ...
"""
def __init__(self, attribute, code):
self.attribute = attribute
self.code = code
def __repr__(self):
return "LogicalAttribute(" + repr(self.attribute) + ")"
def accept(self, visitor):
return visitor.visit_logical_attribute(self)
class LogicalValue:
"""
Logical_value -> "true" | "false"
"""
def __init__(self, value):
self.value = value
def __repr__(self):
return "LogicalValue(" + repr(self.value) + ")"
def accept(self, visitor):
return visitor.visit_logical_value(self)
class TextualClause:
"""
Textual_clause -> Textual_attribute Textual_operator Textual_value
"""
def __init__(self, attribute, operator, value):
self.attribute = attribute
self.operator = operator
self.value = value
def __repr__(self):
return "TextualClause(" + repr(self.attribute) + "," \
+ repr(self.operator) + "," \
+ repr(self.value) + ")"
def accept(self, visitor):
return visitor.visit_textual_clause(self)
class TextualAttribute:
"""
Textual_attribute -> "Beneficiary code" | "Date of birth" | ...
"""
def __init__(self, attribute, code):
self.attribute = attribute
self.code = code
def __repr__(self):
return "TextualAttribute(" + repr(self.attribute) + ")"
def accept(self, visitor):
return visitor.visit_textual_attribute(self)
class TextualOperator:
"""
Textual_operator -> "is" | "matches" | "between"
"""
def __init__(self, operator):
self.operator = operator
def __repr__(self):
return "TextualOperator(" + repr(self.operator) + ")"
def accept(self, visitor):
return visitor.visit_textual_operator(self)
class TextualValue:
"""
Textual_value -> Character+
Character -> "a" | "b" | ...
"""
def __init__(self, characters):
self.characters = characters
def __repr__(self):
return "TextualValue(" + repr(self.characters) + ")"
def accept(self, visitor):
return visitor.visit_textual_value(self)
class BetweenClause:
"""
For the updated grammar case: attribute "between" value "to" value
"""
def __init__(self, attribute, value1, value2):
self.attribute = attribute
self.value1 = value1
self.value2 = value2
def __repr__(self):
return "BetweenClause(" + repr(self.attribute) + "," \
+ repr(self.value1) + "," \
+ repr(self.value2) + ")"
def accept(self, visitor):
return visitor.visit_between_clause(self)
| |
import numpy
from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_power_density, xoppy_calc_undulator_spectrum
from orangecontrib.xoppy.util.xoppy_xraylib_util import xpower_calc
from orangecontrib.xoppy.util.fit_gaussian2d import fit_gaussian2d, info_params, twoD_Gaussian
from srxraylib.plot.gol import plot, plot_image
import scipy.constants as codata
def calculate_line(photon_energy,undulator_period,N,K,thickness_diamond_mm,distance,slit_h,slit_v,coating,incident_angle_mrad,
do_plot=False):
print("######################### INPUTS ###################################")
print("photon_energy=",photon_energy)
print("undulator_period=",undulator_period)
print("N=",N)
print("K=",K)
print("thickness_diamond_mm=",thickness_diamond_mm)
print("distance=",distance)
print("slit_h=",slit_h)
print("coating=",coating)
print("incident_angle_mrad=",incident_angle_mrad)
print("#######################################################################")
out_dictionary = {}
#
# Spectrum simulation
#
#ULATTICEFILE S28D.mat
#UEPSILONX 1.3166e-10
#UEPSILONY 5e-12
#BETAX = 6.89997
#BETAY = 2.6447
SIGMAX = 30.1836 * 1e-6
SIGMAY = 3.63641 * 1e-6
SIGMAXP = 4.36821 * 1e-6
SIGMAYP = 1.37498 * 1e-6
METHOD = 2 # US=0 URGENT=1 SRW=2
print("\n\n Computing spectrum \n\n")
e, f, spectral_power, cumulated_power = \
xoppy_calc_undulator_spectrum(ELECTRONENERGY=6.0,ELECTRONENERGYSPREAD=0.001,ELECTRONCURRENT=0.2,\
ELECTRONBEAMSIZEH=SIGMAX,ELECTRONBEAMSIZEV=SIGMAY,\
ELECTRONBEAMDIVERGENCEH=SIGMAXP,ELECTRONBEAMDIVERGENCEV=SIGMAYP,\
PERIODID=undulator_period,NPERIODS=N,KV=K,DISTANCE=distance,GAPH=slit_h,GAPV=slit_v,\
PHOTONENERGYMIN=1000.0,PHOTONENERGYMAX=100000.0,PHOTONENERGYPOINTS=500,METHOD=2,
USEEMITTANCES=1)
power_in_spectrum = f.sum()*1e3*codata.e*(e[1]-e[0])
out_dictionary["power_in_spectrum"] = power_in_spectrum
if do_plot:
plot(e,spectral_power,title="E = %d keV"%photon_energy)
#
# optical system
#
# """
# Apply reflectivities/transmittivities of optical elements on a source spectrum
#
# :param energies: the array with photon energies in eV
# :param source: the spectral intensity or spectral power
# :param substance: a list with descriptors of each optical element material
# :param flags: a list with 0 (filter or attenuator) or 1 (mirror) for all optical elements
# :param dens: a list with densities of o.e. materials. "?" is accepted for looking in the database
# :param thick: a list with the thickness in mm for all o.e.'s. Only applicable for filters
# :param angle: a list with the grazing angles in mrad for all o.e.'s. Only applicable for mirrors
# :param roughness:a list with the roughness RMS in A for all o.e.'s. Only applicable for mirrors
# :param output_file: name of the output file (default=None, no output file)
# :return: a dictionary with the results
# """
optical_system_dictionary = xpower_calc(energies=e,source=spectral_power,
substance=["C",coating,coating],flags=[0,1,1],dens=[3.53,2.33,2.33],
thick=[thickness_diamond_mm,1,1],
angle=[0,incident_angle_mrad,incident_angle_mrad],roughness=[0,0,0],
output_file=None)
for key in optical_system_dictionary.keys():
print(key)
print(optical_system_dictionary["info"])
for i,ilabel in enumerate(optical_system_dictionary["labels"]):
print(i,ilabel)
# 0 Photon Energy [eV]
# 1 Source
# 2 [oe 1] Total CS cm2/g
# 3 [oe 1] Mu cm^-1
# 4 [oe 1] Transmitivity
# 5 [oe 1] Absorption
# 6 Intensity after oe #1
# 7 [oe 2] 1-Re[n]=delta
# 8 [oe 2] Im[n]=beta
# 9 [oe 2] delta/beta
# 10 [oe 2] Reflectivity-s
# 11 [oe 2] Transmitivity
# 12 Intensity after oe #2
# 13 [oe 3] 1-Re[n]=delta
# 14 [oe 3] Im[n]=beta
# 15 [oe 3] delta/beta
# 16 [oe 3] Reflectivity-s
# 17 [oe 3] Transmitivity
# 18 Intensity after oe #3
print(optical_system_dictionary["data"].shape)
# I would be interested in:
#
# - Total Power [W] emitted in the slit aperture: power_in_spectrum
#
# - Absorbed Power [W] by Diamond Window: integral of col6-col1
#
# - Absorbed Power [W] for 1rst and 2nd mirrors: : integral of col112-col6 and integral of col18-col12
#
# - Fitted parameters from the power density distribution calculated in a 5*5 mm slit aperture:
#
# - Maximum value [W/mm2]
#
# - Gaussian Fit parameters for both axis: FWHM [mm]
I0 = numpy.trapz( optical_system_dictionary["data"][1,:], x=e, axis=-1)
I1 = numpy.trapz( optical_system_dictionary["data"][6,:], x=e, axis=-1)
I2 = numpy.trapz( optical_system_dictionary["data"][12,:], x=e, axis=-1)
I3 = numpy.trapz( optical_system_dictionary["data"][18,:], x=e, axis=-1)
print("Source power: ",I0)
print(" after diamond: ",I1)
print(" after M1: ",I2)
print(" after M2: ",I3)
out_dictionary["diamond_absorbed"] = I0-I1
out_dictionary["m1_absorbed"] = I1-I2
out_dictionary["m2_absorbed"] = I2-I3
#
# power density
#
h, v, p, code = xoppy_calc_undulator_power_density(ELECTRONENERGY=6.0,ELECTRONENERGYSPREAD=0.001,ELECTRONCURRENT=0.2,\
ELECTRONBEAMSIZEH=SIGMAX,ELECTRONBEAMSIZEV=SIGMAY,\
ELECTRONBEAMDIVERGENCEH=SIGMAXP,ELECTRONBEAMDIVERGENCEV=SIGMAYP,\
PERIODID=undulator_period,NPERIODS=N,KV=K,DISTANCE=distance,GAPH=5e-3,GAPV=5e-3,\
HSLITPOINTS=101,VSLITPOINTS=101,METHOD=2,USEEMITTANCES=1)
if do_plot:
plot_image(p,h,v,title="power density E = %d keV"%photon_energy)
#
# fit power density
#
print("============= Fitting power density to a 2D Gaussian. ==============\n")
print("Please use these results with care: check if the original data looks like a Gaussian.")
fit_parameters = fit_gaussian2d(p,h,v)
print(info_params(fit_parameters))
H,V = numpy.meshgrid(h,v)
data_fitted = twoD_Gaussian( (H,V), *fit_parameters)
power_in_spectrum = p.sum()*(h[1]-h[0])*(v[1]-v[0])
print(" Total power in the calculated data [W]: ",power_in_spectrum)
power_in_spectrum_fit = data_fitted.sum()*(h[1]-h[0])*(v[1]-v[0])
print(" Total power in the fitted data [W]: ",power_in_spectrum_fit)
# plot_image(data_fitted.reshape((h.size,v.size)),h, v,title="FIT")
print("====================================================\n")
if do_plot:
data_fitted.shape = (h.size,v.size)
plot_image(data_fitted,h,v,title="FITTED power density E = %d keV"%photon_energy)
out_dictionary["fit_parameters"] = fit_parameters
out_dictionary["fit_percent_difference"] = 100 * (power_in_spectrum_fit - power_in_spectrum) / power_in_spectrum
return out_dictionary
if __name__ == "__main__":
Energy_keV = [ 5 , 7 , 10 , 12 , 15 , 20 , 25 , 30 , 40 ]
#Undulator = [ U32 , U27 , U27 , U27 , U27 , U27 , U32 , U32 , U32 ]
lambda0_cm = [ 3.2 , 2.7 , 2.7 , 2.7 , 2.7 , 2.7 , 3.2 , 3.2 , 3.2 ]
N = [ 72 , 85 , 85 , 85 , 85 , 85 , 72 , 72 , 72 ]
K = [ 1.53, 1.292, 0.756, 1.846, 1.535, 1.12, 1.044, 1.274, 1.102 ]
Diamond_window_thickness_mm = [ 0.3 , 0.3 , 0.3 , 0.3 , 0.3 , 0.3 , 0.3 , 0.3 , 0.3 ]
Distance_from_source_m = [ 27 , 27 , 27 , 27 , 27 , 27 , 27 , 27 , 27 ]
V_mm = [ 0.6 , 0.6 , 0.6 , 0.6 , 0.6 , 0.6 , 0.6 , 0.6 , 0.6 ]
H_mm = [ 1.2 , 1.2 , 1 , 0.9 , 1.2 , 1.2 , 1.2 , 0.9 , 0.9 ]
Coating = [ "Si" , "Si" , "Si" , "Si" , "Rh" , "Rh" , "Pt" , "Pt" , "Pt" ]
Incident_angle_mrad = [ 4 , 3 , 2.5 , 2 , 4 , 3 , 3 , 2 , 2 ]
#
# calculation loop
#
out_dictionaries = []
for i,photon_energy in enumerate(Energy_keV):
out_dictionary = calculate_line(photon_energy,1e-2*lambda0_cm[i],N[i],K[i],Diamond_window_thickness_mm[i],
Distance_from_source_m[i],1e-3*H_mm[i],1e-3*V_mm[i],Coating[i],Incident_angle_mrad[i],
do_plot=False)
out_dictionaries.append(out_dictionary)
#
# prepare text output
#
text_output = ""
titles = ["energy_kev","power_in_spectrum","diamond_absorbed","m1_absorbed","m2_absorbed"]
text_output += (" %20s %20s %20s %20s %20s \n")%(tuple(titles))
for i in range(len(out_dictionaries)):
text_output += ("%20d %20.3f %20.3f %20.3f %20.3f \n")%( Energy_keV[i],
out_dictionaries[i]["power_in_spectrum"],
out_dictionaries[i]["diamond_absorbed"],
out_dictionaries[i]["m1_absorbed"],
out_dictionaries[i]["m2_absorbed"])
text_fit = ""
titles_fit = ["energy_kev","Height A: ","center x0:","center y0","sigmax","sigmay","angle","offset","fit difference"]
text_fit += ("%20s %20s %20s %20s %20s %20s %20s %20s %20s\n")%(tuple(titles_fit))
for i in range(len(out_dictionaries)):
text_fit += ("%20d %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f %20.3f \n")%(
Energy_keV[i],
out_dictionaries[i]["fit_parameters"][0],
out_dictionaries[i]["fit_parameters"][1],
out_dictionaries[i]["fit_parameters"][2],
out_dictionaries[i]["fit_parameters"][3],
out_dictionaries[i]["fit_parameters"][4],
out_dictionaries[i]["fit_parameters"][5],
out_dictionaries[i]["fit_parameters"][6],
out_dictionaries[i]["fit_percent_difference"])
print(text_output)
print(text_fit)
#
# dump to file
#
f = open("script1.txt",'w')
f.write(text_output)
f.write("\n\n\n")
f.write(text_fit)
f.close()
print("File written to disk: script1.txt")
| |
# coding=utf-8
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kitti.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from task_adaptation.data import data_testing_lib
from task_adaptation.data import kitti
import tensorflow.compat.v1 as tf
class KittiDataCountTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(KittiDataCountTest, self).setUp(
data_wrapper=kitti.KittiData(task="count_all"),
num_classes=16,
expected_num_samples=dict(
train=6347,
val=423,
trainval=6770,
test=711,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class KittiDataCountLeftTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(KittiDataCountLeftTest, self).setUp(
data_wrapper=kitti.KittiData(task="count_left"),
num_classes=16,
expected_num_samples=dict(
train=6347,
val=423,
trainval=6770,
test=711,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class KittiDataCountFarTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(KittiDataCountFarTest, self).setUp(
data_wrapper=kitti.KittiData(task="count_far"),
num_classes=16,
expected_num_samples=dict(
train=6347,
val=423,
trainval=6770,
test=711,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class KittiDataCountNearTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(KittiDataCountNearTest, self).setUp(
data_wrapper=kitti.KittiData(task="count_near"),
num_classes=16,
expected_num_samples=dict(
train=6347,
val=423,
trainval=6770,
test=711,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class KittiDataClosestDistanceTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(KittiDataClosestDistanceTest, self).setUp(
data_wrapper=kitti.KittiData(task="closest_object_distance"),
num_classes=5,
expected_num_samples=dict(
train=6347,
val=423,
trainval=6770,
test=711,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class KittiDataClosestXLocTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(KittiDataClosestXLocTest, self).setUp(
data_wrapper=kitti.KittiData(task="closest_object_x_location"),
num_classes=5,
expected_num_samples=dict(
train=6347,
val=423,
trainval=6770,
test=711,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class KittiDataCountVehiclesTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(KittiDataCountVehiclesTest, self).setUp(
data_wrapper=kitti.KittiData(task="count_vehicles"),
num_classes=4,
expected_num_samples=dict(
train=6347,
val=423,
trainval=6770,
test=711,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class KittiDataClosestVehicleTest(data_testing_lib.BaseVTABDataTest):
"""See base class for usage and test descriptions."""
def setUp(self):
super(KittiDataClosestVehicleTest, self).setUp(
data_wrapper=kitti.KittiData(task="closest_vehicle_distance"),
num_classes=4,
expected_num_samples=dict(
train=6347,
val=423,
trainval=6770,
test=711,
train800val200=1000,
train800=800,
val200=200,
),
required_tensors_shapes={
"image": (None, None, 3),
"label": (),
},
tfds_label_key_map={})
class TestPreprocessing(tf.test.TestCase):
def test_count_vehicles(self):
sess = tf.Session()
x = {"image": tf.constant([0])}
x["objects"] = {"type": tf.constant([0])}
self.assertEqual(1, sess.run(kitti._count_vehicles_pp(x)["label"]))
x["objects"] = {"type": tf.constant([3])}
self.assertEqual(0, sess.run(kitti._count_vehicles_pp(x)["label"]))
x["objects"] = {"type": tf.constant([0, 1])}
self.assertEqual(2, sess.run(kitti._count_vehicles_pp(x)["label"]))
x["objects"] = {"type": tf.constant([0, 1, 2])}
self.assertEqual(3, sess.run(kitti._count_vehicles_pp(x)["label"]))
x["objects"] = {"type": tf.constant([0, 1, 2, 2, 2, 2, 2])}
self.assertEqual(3, sess.run(kitti._count_vehicles_pp(x)["label"]))
def test_closest_vehicle(self):
sess = tf.Session()
x = {"image": tf.constant([0])}
x["objects"] = {
"type": tf.constant([0]),
"location": tf.constant([[0.0, 0.0, 1.0]]),
}
self.assertEqual(0,
sess.run(kitti._closest_vehicle_distance_pp(x)["label"]))
x["objects"] = {
"type": tf.constant([0]),
"location": tf.constant([[0.0, 0.0, 10.0]]),
}
self.assertEqual(1,
sess.run(kitti._closest_vehicle_distance_pp(x)["label"]))
x["objects"] = {
"type": tf.constant([0]),
"location": tf.constant([[0.0, 0.0, 30.0]]),
}
self.assertEqual(2,
sess.run(kitti._closest_vehicle_distance_pp(x)["label"]))
x["objects"] = {
"type": tf.constant([4]),
"location": tf.constant([[0.0, 0.0, 30.0]]),
}
self.assertEqual(3,
sess.run(kitti._closest_vehicle_distance_pp(x)["label"]))
x["objects"] = {
"type": tf.constant([0, 1]),
"location": tf.constant([[0.0, 0.0, 30.0], [0.0, 0.0, 1.0]]),
}
self.assertEqual(0,
sess.run(kitti._closest_vehicle_distance_pp(x)["label"]))
if __name__ == "__main__":
tf.test.main()
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import json
import os.path
import pathlib
from typing import Any
from typing import List
from typing import NamedTuple
import json5
import corenet.configuration as c
import corenet.data.dataset as dataset_lib
class AllDataSets(NamedTuple):
single_train: Any
single_val: Any
single_test: Any
pairs_train: Any
pairs_val: Any
pairs_test: Any
triplets_train: Any
triplets_val: Any
triplets_test: Any
def lo_realism(all_ds: AllDataSets) -> AllDataSets:
return AllDataSets(
*[dataclasses.replace(ds, high_realism=False) for ds in all_ds])
def shuffle_per_epoch(ds: c.Dataset) -> c.Dataset:
return dataclasses.replace(ds, shuffle=c.ShuffleType.PER_EPOCH)
def create_data_loader():
"""Default data loader, used for training and evaluation of all models."""
return c.DataLoaderConfig(num_data_workers=6, batch_size=4)
def create_evals(all_ds: AllDataSets, num_obj: int, vox: c.VoxelizationConfig):
"""Creates recurrent evaluation configurations for a model.
Args:
all_ds: All datasets
num_obj: The number of objects in the scene
vox: Voxelization configuration
Returns:
Array of recurrent evaluation configurations.
"""
ds_name = {1: "single", 2: "pairs", 3: "triplets"}[num_obj]
ds_test = getattr(all_ds, f"{ds_name}_test") # type: c.Dataset
ds_val = getattr(all_ds, f"{ds_name}_val") # type: c.Dataset
assert ds_test.shuffle == c.ShuffleType.ONCE
# 1% of test data
ds_test_short = dataclasses.replace(ds_test, data_fraction=1e-2)
# 10% of test data
ds_test_medium = dataclasses.replace(ds_test, data_fraction=1e-1)
# 1% of val data, which is also used for training
ds_short_train = dataclasses.replace(ds_val, data_fraction=1e-2,
shuffle=c.ShuffleType.ONCE)
return [
# Frequently run evaluation on a small fraction of the train data. Data
# is shuffled in a stable way (independent of the eval run). This
# guarantees that the exact same examples appear in all eval runs, which
# allows tracking their progress both quantitatively and qualitatively.
c.RecurrentEvalConfig(
start_step=40000, interval=40000, persistent_checkpoint=False,
config=c.EvalConfig(
name="short_stable_train_eval",
num_qualitative_results=40,
num_qualitative_results_in_tensor_board=4,
data=c.DataPipeline(
datasets=[ds_short_train], data_loader=create_data_loader(),
voxelization_config=vox, shuffle=c.ShuffleType.ONCE))),
# Frequently run evaluation on a small fraction of all test data. Data
# is shuffled in a stable way (independent of the eval run). This
# guarantees that the exact same examples appear in all eval runs, which
# allows tracking their progress both quantitatively and qualitatively.
c.RecurrentEvalConfig(
start_step=40000, interval=40000, persistent_checkpoint=False,
config=c.EvalConfig(
name="short_stable_eval",
num_qualitative_results=40,
num_qualitative_results_in_tensor_board=4,
data=c.DataPipeline(
datasets=[ds_test_short], data_loader=create_data_loader(),
voxelization_config=vox, shuffle=c.ShuffleType.ONCE))),
# Less frequently run evaluation on a larger fraction of the test data.
# Data is shuffled differently in the different eval runs, which means
# that each eval run will see a different set of examples. This allows
# to judge current model performance in an unbiased way.
c.RecurrentEvalConfig(
start_step=140000, interval=140000, persistent_checkpoint=False,
config=c.EvalConfig(
name="medium_eval",
num_qualitative_results=100,
num_qualitative_results_in_tensor_board=4,
data=c.DataPipeline(
datasets=[shuffle_per_epoch(ds_test_medium)],
data_loader=create_data_loader(), voxelization_config=vox,
shuffle=c.ShuffleType.PER_EPOCH))),
# Full evaluation run, which always sees all test data. Data is shuffled
# in a stable way, which guarantees that the exact same qualitative
# examples are saved in all eval runs. This allows tracking their progress
# both qualitatively.
c.RecurrentEvalConfig(
start_step=500000, interval=500000, persistent_checkpoint=True,
config=c.EvalConfig(
name="full_eval",
num_qualitative_results=500,
num_qualitative_results_in_tensor_board=0,
data=c.DataPipeline(
datasets=[ds_test], data_loader=create_data_loader(),
voxelization_config=vox, shuffle=c.ShuffleType.ONCE)))
]
schema_paths = {
c.TrainPipeline: "../schemas/train_config.json",
c.TfModelEvalPipeline: "../schemas/tf_model_eval_config.json"
}
def dumps(p: c.JsonSchemaMixin):
d = p.to_dict()
d["$schema"] = schema_paths[type(p)]
result = json5.dumps(d, indent=2)
result = (
f"//Generated automatically, by {os.path.basename(__file__)}\n{result}")
return result
def generate_default_datasets() -> AllDataSets:
"""Returns all datasets with default settings.
Settings: use all data (fraction=1.0); hi-realism; stable shuffling (i.e.
independent of epoch/step and run).
"""
ds = []
for field_name in AllDataSets._fields:
ds_name, ds_split = field_name.split("_")
json_file = (
"dataset.choy_classes.json" if ds_name == "single" else "dataset.json")
ds_path = f"{{data_dir}}/{ds_name}.{ds_split}/{json_file}"
ds.append(c.Dataset(
dataset_path=ds_path, meshes_dir="{meshes_dir}", high_realism=True,
shuffle=c.ShuffleType.ONCE, data_fraction=1.0))
return AllDataSets(*ds)
def generate_common_string_templates() -> List[c.StringTemplate]:
"""Returns string templates common for all models."""
return [
# The root data directory
c.StringTemplate(key="data_dir", value="data"),
# Directory containing the ShapeNet meshes
c.StringTemplate(key="meshes_dir", value="{data_dir}/shapenet_meshes"),
]
def generate_configs():
common_string_templates = generate_common_string_templates()
common_string_templates += [
# Initial Rensnet50 checkpoint, trained on ImageNet
c.StringTemplate(key="resnet_cpt",
value="{data_dir}/keras_resnet50_imagenet.cpt"),
# Root output directory
c.StringTemplate(key="output_dir", value="output/models")
]
ds = generate_default_datasets()
# 128^3 voxelization, fixed grid offset, FG/BG reconstruction
# Use for training models h5, h7 and for evaluation of h5, h7, y1
vox_fgbg_128_fixed = c.VoxelizationConfig(
task_type=c.TaskType.FG_BG, resolution=c.Resolution(128, 128, 128),
sub_grid_sampling=False, conservative_rasterization=False,
voxelization_image_resolution_multiplier=8)
# 32^3 voxelization, random grid offset, FG/BG reconstruction, improved
# approximation thorough sub-grid sampling.
# Use for training model y1
vox_fgbg_32_rnd = c.VoxelizationConfig(
task_type=c.TaskType.FG_BG, resolution=c.Resolution(32, 32, 32),
sub_grid_sampling=True, conservative_rasterization=False,
voxelization_image_resolution_multiplier=31)
# 128^3 voxelization, fixed grid offset, semantic class reconstruction
# Use for training and evaluation of models models m7 and m9
vox_sem_128_fixed = dataclasses.replace(vox_fgbg_128_fixed,
task_type=c.TaskType.SEMANTIC)
# Training parameters common to all models
common_train_params = dict(
resnet50_imagenet_checkpoint="{resnet_cpt}",
checkpoint_interval=10000,
persistent_checkpoint_interval=500000,
last_upscale_factor=2,
latent_channels=64,
skip_fraction=0.75,
max_steps=16000000,
tensorboard_log_interval=1000,
initial_learning_rate=0.0004,
adam_epsilon=0.0001,
)
h5 = c.TrainPipeline(
string_templates=common_string_templates,
train=c.TrainConfig(
data=c.DataPipeline(
datasets=[shuffle_per_epoch(lo_realism(ds).single_train),
shuffle_per_epoch(lo_realism(ds).single_val)],
data_loader=create_data_loader(), shuffle=c.ShuffleType.PER_EPOCH,
voxelization_config=vox_fgbg_128_fixed),
random_grid_offset=False, **common_train_params),
eval=create_evals(lo_realism(ds), 1, vox_fgbg_128_fixed),
output_path="{output_dir}/h5"
)
h7 = c.TrainPipeline(
string_templates=common_string_templates,
train=c.TrainConfig(
data=c.DataPipeline(
datasets=[shuffle_per_epoch(ds.single_train),
shuffle_per_epoch(ds.single_val)],
data_loader=create_data_loader(), shuffle=c.ShuffleType.PER_EPOCH,
voxelization_config=vox_fgbg_128_fixed),
random_grid_offset=False, **common_train_params),
eval=create_evals(ds, 1, vox_fgbg_128_fixed),
output_path="{output_dir}/h7"
)
y1 = c.TrainPipeline(
string_templates=common_string_templates,
train=c.TrainConfig(
data=c.DataPipeline(
datasets=[shuffle_per_epoch(ds.single_train),
shuffle_per_epoch(ds.single_val)],
data_loader=create_data_loader(), shuffle=c.ShuffleType.PER_EPOCH,
voxelization_config=vox_fgbg_32_rnd),
random_grid_offset=True, **common_train_params),
eval=create_evals(ds, 1, vox_fgbg_128_fixed),
output_path="{output_dir}/y1"
)
m7 = c.TrainPipeline(
string_templates=common_string_templates,
train=c.TrainConfig(
data=c.DataPipeline(
datasets=[shuffle_per_epoch(ds.pairs_train),
shuffle_per_epoch(ds.pairs_val)],
data_loader=create_data_loader(), shuffle=c.ShuffleType.PER_EPOCH,
voxelization_config=vox_sem_128_fixed),
random_grid_offset=False, **common_train_params),
eval=create_evals(ds, 2, vox_sem_128_fixed),
output_path="{output_dir}/m7"
)
m9 = c.TrainPipeline(
string_templates=common_string_templates,
train=c.TrainConfig(
data=c.DataPipeline(
datasets=[shuffle_per_epoch(ds.triplets_train),
shuffle_per_epoch(ds.triplets_val)],
data_loader=create_data_loader(), shuffle=c.ShuffleType.PER_EPOCH,
voxelization_config=vox_sem_128_fixed),
random_grid_offset=False, **common_train_params),
eval=create_evals(ds, 3, vox_sem_128_fixed),
output_path="{output_dir}/m9"
)
config_dir = pathlib.Path(__file__).parent.parent / "configs" / "models"
(config_dir / "h5.json5").write_text(dumps(h5))
(config_dir / "h7.json5").write_text(dumps(h7))
(config_dir / "m7.json5").write_text(dumps(m7))
(config_dir / "m9.json5").write_text(dumps(m9))
# (config_dir / "y1.json5").write_text(dumps(y1)) # y1 is still untested
def generate_paper_tf_eval_configs():
ds = generate_default_datasets()
common_string_templates = generate_common_string_templates()
common_string_templates += [
# Directory containing the pre-trained models from the paper
c.StringTemplate("paper_tf_models_dir",
"{data_dir}/paper_tf_models"),
# Root output directory
c.StringTemplate(key="output_dir", value="output/paper_tf_models")
]
vox_fgbg = c.VoxelizationConfig(
task_type=c.TaskType.FG_BG, resolution=c.Resolution(128, 128, 128),
sub_grid_sampling=False, conservative_rasterization=False,
voxelization_image_resolution_multiplier=4,
voxelization_projection_depth_multiplier=1)
vox_h7 = dataclasses.replace(vox_fgbg,
voxelization_projection_depth_multiplier=2)
vox_sem = dataclasses.replace(vox_fgbg, task_type=c.TaskType.SEMANTIC)
default_data_loader = c.DataLoaderConfig(num_data_workers=6, batch_size=8)
common_eval_params = dict(
name="full_eval", num_qualitative_results=40,
num_qualitative_results_in_tensor_board=0,
)
h5 = c.TfModelEvalPipeline(
eval_config=c.EvalConfig(
data=c.DataPipeline(
datasets=[lo_realism(ds).single_test], shuffle=c.ShuffleType.ONCE,
data_loader=default_data_loader, voxelization_config=vox_fgbg),
**common_eval_params),
frozen_graph_path="{paper_tf_models_dir}/h5.pb",
string_templates=common_string_templates, output_path="{output_dir}/h5")
h7 = c.TfModelEvalPipeline(
eval_config=c.EvalConfig(
data=c.DataPipeline(
datasets=[ds.single_test], shuffle=c.ShuffleType.ONCE,
data_loader=default_data_loader, voxelization_config=vox_h7),
**common_eval_params),
frozen_graph_path="{paper_tf_models_dir}/h7.pb",
string_templates=common_string_templates, output_path="{output_dir}/h7")
m7 = c.TfModelEvalPipeline(
eval_config=c.EvalConfig(
data=c.DataPipeline(
datasets=[ds.pairs_test], shuffle=c.ShuffleType.ONCE,
data_loader=default_data_loader, voxelization_config=vox_sem),
**common_eval_params),
frozen_graph_path="{paper_tf_models_dir}/m7.pb",
string_templates=common_string_templates, output_path="{output_dir}/m7")
m9 = c.TfModelEvalPipeline(
eval_config=c.EvalConfig(
data=c.DataPipeline(
datasets=[ds.triplets_test], shuffle=c.ShuffleType.ONCE,
data_loader=default_data_loader, voxelization_config=vox_sem),
**common_eval_params),
frozen_graph_path="{paper_tf_models_dir}/m9.pb",
string_templates=common_string_templates, output_path="{output_dir}/m9")
y1 = c.TfModelEvalPipeline(
eval_config=c.EvalConfig(
data=c.DataPipeline(
datasets=[lo_realism(ds).single_test], shuffle=c.ShuffleType.ONCE,
data_loader=default_data_loader, voxelization_config=vox_fgbg),
**common_eval_params),
frozen_graph_path="{paper_tf_models_dir}/y1.pb",
string_templates=common_string_templates, output_path="{output_dir}/y1")
config_dir = (pathlib.Path(__file__).parent.parent /
"configs" / "paper_tf_models")
config_dir.mkdir(parents=True)
(config_dir / "h7.json5").write_text(dumps(h7))
(config_dir / "h5.json5").write_text(dumps(h5))
(config_dir / "m7.json5").write_text(dumps(m7))
(config_dir / "m9.json5").write_text(dumps(m9))
(config_dir / "y1.json5").write_text(dumps(y1))
def generate_schemas():
schema_dir = pathlib.Path(__file__).parent.parent / "configs" / "schemas"
schema_dir.mkdir(parents=True, exist_ok=True)
(schema_dir / "tf_model_eval_config.json").write_text(json.dumps(
c.TfModelEvalPipeline.json_schema(), sort_keys=True,
indent=2))
(schema_dir / "dataset_config.json").write_text(json.dumps(
dataset_lib.DatasetConfig.json_schema(), sort_keys=True, indent=2))
(schema_dir / "train_config.json").write_text(json.dumps(
c.TrainPipeline.json_schema(), sort_keys=True, indent=2))
def main():
generate_schemas()
generate_configs()
generate_paper_tf_eval_configs()
if __name__ == '__main__':
main()
| |
"""
Provide authentication using simple LDAP binds
:depends: - ldap Python module
"""
import itertools
import logging
import salt.utils.data
import salt.utils.stringutils
from jinja2 import Environment
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
try:
# pylint: disable=no-name-in-module
import ldap
import ldap.modlist
import ldap.filter
HAS_LDAP = True
# pylint: enable=no-name-in-module
except ImportError:
HAS_LDAP = False
# Defaults, override in master config
__defopts__ = {
"auth.ldap.basedn": "",
"auth.ldap.uri": "",
"auth.ldap.server": "localhost",
"auth.ldap.port": "389",
"auth.ldap.starttls": False,
"auth.ldap.tls": False,
"auth.ldap.no_verify": False,
"auth.ldap.anonymous": False,
"auth.ldap.scope": 2,
"auth.ldap.groupou": "Groups",
"auth.ldap.accountattributename": "memberUid",
"auth.ldap.groupattribute": "memberOf",
"auth.ldap.persontype": "person",
"auth.ldap.groupclass": "posixGroup",
"auth.ldap.activedirectory": False,
"auth.ldap.freeipa": False,
"auth.ldap.minion_stripdomains": [],
}
def _config(key, mandatory=True, opts=None):
"""
Return a value for 'name' from master config file options or defaults.
"""
try:
if opts:
value = opts["auth.ldap.{}".format(key)]
else:
value = __opts__["auth.ldap.{}".format(key)]
except KeyError:
try:
value = __defopts__["auth.ldap.{}".format(key)]
except KeyError:
if mandatory:
msg = "missing auth.ldap.{} in master config".format(key)
raise SaltInvocationError(msg)
return False
return value
def _render_template(param, username):
"""
Render config template, substituting username where found.
"""
env = Environment()
template = env.from_string(param)
variables = {"username": username}
return template.render(variables)
class _LDAPConnection:
"""
Setup an LDAP connection.
"""
def __init__(
self,
uri,
server,
port,
starttls,
tls,
no_verify,
binddn,
bindpw,
anonymous,
accountattributename,
activedirectory=False,
):
"""
Bind to an LDAP directory using passed credentials.
"""
self.uri = uri
self.server = server
self.port = port
self.starttls = starttls
self.tls = tls
self.binddn = binddn
self.bindpw = bindpw
if not HAS_LDAP:
raise CommandExecutionError(
"LDAP connection could not be made, the python-ldap module is "
"not installed. Install python-ldap to use LDAP external auth."
)
if self.starttls and self.tls:
raise CommandExecutionError(
"Cannot bind with both starttls and tls enabled."
"Please enable only one of the protocols"
)
schema = "ldaps" if tls else "ldap"
if self.uri == "":
self.uri = "{}://{}:{}".format(schema, self.server, self.port)
try:
if no_verify:
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
self.ldap = ldap.initialize("{}".format(self.uri))
self.ldap.protocol_version = 3 # ldap.VERSION3
self.ldap.set_option(ldap.OPT_REFERRALS, 0) # Needed for AD
if not anonymous:
if not self.bindpw:
raise CommandExecutionError(
"LDAP bind password is not set: password cannot be empty if auth.ldap.anonymous is False"
)
if self.starttls:
self.ldap.start_tls_s()
self.ldap.simple_bind_s(self.binddn, self.bindpw)
except Exception as ldap_error: # pylint: disable=broad-except
raise CommandExecutionError(
"Failed to bind to LDAP server {} as {}: {}".format(
self.uri, self.binddn, ldap_error
)
)
def _bind_for_search(anonymous=False, opts=None):
"""
Bind with binddn and bindpw only for searching LDAP
:param anonymous: Try binding anonymously
:param opts: Pass in when __opts__ is not available
:return: LDAPConnection object
"""
# Get config params; create connection dictionary
connargs = {}
# config params (auth.ldap.*)
params = {
"mandatory": [
"uri",
"server",
"port",
"starttls",
"tls",
"no_verify",
"anonymous",
"accountattributename",
"activedirectory",
],
"additional": [
"binddn",
"bindpw",
"filter",
"groupclass",
"auth_by_group_membership_only",
],
}
paramvalues = {}
for param in params["mandatory"]:
paramvalues[param] = _config(param, opts=opts)
for param in params["additional"]:
paramvalues[param] = _config(param, mandatory=False, opts=opts)
paramvalues["anonymous"] = anonymous
# Only add binddn/bindpw to the connargs when they're set, as they're not
# mandatory for initializing the LDAP object, but if they're provided
# initially, a bind attempt will be done during the initialization to
# validate them
if paramvalues["binddn"]:
connargs["binddn"] = paramvalues["binddn"]
if paramvalues["bindpw"]:
params["mandatory"].append("bindpw")
for name in params["mandatory"]:
connargs[name] = paramvalues[name]
if not paramvalues["anonymous"]:
if paramvalues["binddn"] and paramvalues["bindpw"]:
# search for the user's DN to be used for the actual authentication
return _LDAPConnection(**connargs).ldap
def _bind(username, password, anonymous=False, opts=None):
"""
Authenticate via an LDAP bind
"""
# Get config params; create connection dictionary
basedn = _config("basedn", opts=opts)
scope = _config("scope", opts=opts)
connargs = {}
# config params (auth.ldap.*)
params = {
"mandatory": [
"uri",
"server",
"port",
"starttls",
"tls",
"no_verify",
"anonymous",
"accountattributename",
"activedirectory",
],
"additional": [
"binddn",
"bindpw",
"filter",
"groupclass",
"auth_by_group_membership_only",
],
}
paramvalues = {}
for param in params["mandatory"]:
paramvalues[param] = _config(param, opts=opts)
for param in params["additional"]:
paramvalues[param] = _config(param, mandatory=False, opts=opts)
paramvalues["anonymous"] = anonymous
if paramvalues["binddn"]:
# the binddn can also be composited, e.g.
# - {{ username }}@domain.com
# - cn={{ username }},ou=users,dc=company,dc=tld
# so make sure to render it first before using it
paramvalues["binddn"] = _render_template(paramvalues["binddn"], username)
paramvalues["binddn"] = ldap.filter.escape_filter_chars(paramvalues["binddn"])
if paramvalues["filter"]:
escaped_username = ldap.filter.escape_filter_chars(username)
paramvalues["filter"] = _render_template(
paramvalues["filter"], escaped_username
)
# Only add binddn/bindpw to the connargs when they're set, as they're not
# mandatory for initializing the LDAP object, but if they're provided
# initially, a bind attempt will be done during the initialization to
# validate them
if paramvalues["binddn"]:
connargs["binddn"] = paramvalues["binddn"]
if paramvalues["bindpw"]:
params["mandatory"].append("bindpw")
for name in params["mandatory"]:
connargs[name] = paramvalues[name]
if not paramvalues["anonymous"]:
if paramvalues["binddn"] and paramvalues["bindpw"]:
# search for the user's DN to be used for the actual authentication
_ldap = _LDAPConnection(**connargs).ldap
log.debug(
"Running LDAP user dn search with filter:%s, dn:%s, scope:%s",
paramvalues["filter"],
basedn,
scope,
)
result = _ldap.search_s(basedn, int(scope), paramvalues["filter"])
if not result:
log.warning("Unable to find user %s", username)
return False
elif len(result) > 1:
# Active Directory returns something odd. Though we do not
# chase referrals (ldap.set_option(ldap.OPT_REFERRALS, 0) above)
# it still appears to return several entries for other potential
# sources for a match. All these sources have None for the
# CN (ldap array return items are tuples: (cn, ldap entry))
# But the actual CNs are at the front of the list.
# So with some list comprehension magic, extract the first tuple
# entry from all the results, create a list from those,
# and count the ones that are not None. If that total is more than one
# we need to error out because the ldap filter isn't narrow enough.
cns = [tup[0] for tup in result]
total_not_none = sum(1 for c in cns if c is not None)
if total_not_none > 1:
log.error(
"LDAP lookup found multiple results for user %s", username
)
return False
elif total_not_none == 0:
log.error(
"LDAP lookup--unable to find CN matching user %s", username
)
return False
connargs["binddn"] = result[0][0]
if paramvalues["binddn"] and not paramvalues["bindpw"]:
connargs["binddn"] = paramvalues["binddn"]
elif paramvalues["binddn"] and not paramvalues["bindpw"]:
connargs["binddn"] = paramvalues["binddn"]
# Update connection dictionary with the user's password
connargs["bindpw"] = password
# Attempt bind with user dn and password
if paramvalues["anonymous"]:
log.debug("Attempting anonymous LDAP bind")
else:
log.debug("Attempting LDAP bind with user dn: %s", connargs["binddn"])
try:
ldap_conn = _LDAPConnection(**connargs).ldap
except Exception: # pylint: disable=broad-except
connargs.pop("bindpw", None) # Don't log the password
log.error("Failed to authenticate user dn via LDAP: %s", connargs)
log.debug("Error authenticating user dn via LDAP:", exc_info=True)
return False
log.debug("Successfully authenticated user dn via LDAP: %s", connargs["binddn"])
return ldap_conn
def auth(username, password):
"""
Simple LDAP auth
"""
if not HAS_LDAP:
log.error("LDAP authentication requires python-ldap module")
return False
bind = None
# If bind credentials are configured, verify that we receive a valid bind
if _config("binddn", mandatory=False) and _config("bindpw", mandatory=False):
search_bind = _bind_for_search(anonymous=_config("anonymous", mandatory=False))
# If username & password are not None, attempt to verify they are valid
if search_bind and username and password:
bind = _bind(
username,
password,
anonymous=_config("auth_by_group_membership_only", mandatory=False)
and _config("anonymous", mandatory=False),
)
else:
bind = _bind(
username,
password,
anonymous=_config("auth_by_group_membership_only", mandatory=False)
and _config("anonymous", mandatory=False),
)
if bind:
log.debug("LDAP authentication successful")
return bind
log.error("LDAP _bind authentication FAILED")
return False
def groups(username, **kwargs):
"""
Authenticate against an LDAP group
Behavior is highly dependent on if Active Directory is in use.
AD handles group membership very differently than OpenLDAP.
See the :ref:`External Authentication <acl-eauth>` documentation for a thorough
discussion of available parameters for customizing the search.
OpenLDAP allows you to search for all groups in the directory
and returns members of those groups. Then we check against
the username entered.
"""
group_list = []
# If bind credentials are configured, use them instead of user's
if _config("binddn", mandatory=False) and _config("bindpw", mandatory=False):
bind = _bind_for_search(anonymous=_config("anonymous", mandatory=False))
else:
bind = _bind(
username,
kwargs.get("password", ""),
anonymous=_config("auth_by_group_membership_only", mandatory=False)
and _config("anonymous", mandatory=False),
)
if bind:
log.debug("ldap bind to determine group membership succeeded!")
if _config("activedirectory"):
try:
get_user_dn_search = "(&({}={})(objectClass={}))".format(
_config("accountattributename"), username, _config("persontype")
)
user_dn_results = bind.search_s(
_config("basedn"),
ldap.SCOPE_SUBTREE,
get_user_dn_search,
["distinguishedName"],
)
except Exception as e: # pylint: disable=broad-except
log.error("Exception thrown while looking up user DN in AD: %s", e)
return group_list
if not user_dn_results:
log.error("Could not get distinguished name for user %s", username)
return group_list
# LDAP results are always tuples. First entry in the tuple is the DN
dn = ldap.filter.escape_filter_chars(user_dn_results[0][0])
ldap_search_string = "(&(member={})(objectClass={}))".format(
dn, _config("groupclass")
)
log.debug("Running LDAP group membership search: %s", ldap_search_string)
try:
search_results = bind.search_s(
_config("basedn"),
ldap.SCOPE_SUBTREE,
ldap_search_string,
[
salt.utils.stringutils.to_str(_config("accountattributename")),
"cn",
],
)
except Exception as e: # pylint: disable=broad-except
log.error(
"Exception thrown while retrieving group membership in AD: %s", e
)
return group_list
for _, entry in search_results:
if "cn" in entry:
group_list.append(salt.utils.stringutils.to_unicode(entry["cn"][0]))
log.debug("User %s is a member of groups: %s", username, group_list)
elif _config("freeipa"):
escaped_username = ldap.filter.escape_filter_chars(username)
search_base = _config("group_basedn")
search_string = _render_template(_config("group_filter"), escaped_username)
search_results = bind.search_s(
search_base,
ldap.SCOPE_SUBTREE,
search_string,
[
salt.utils.stringutils.to_str(_config("accountattributename")),
salt.utils.stringutils.to_str(_config("groupattribute")),
"cn",
],
)
for entry, result in search_results:
for user in itertools.chain(
result.get(_config("accountattributename"), []),
result.get(_config("groupattribute"), []),
):
if (
username
== salt.utils.stringutils.to_unicode(user)
.split(",")[0]
.split("=")[-1]
):
group_list.append(entry.split(",")[0].split("=")[-1])
log.debug("User %s is a member of groups: %s", username, group_list)
if not auth(username, kwargs["password"]):
log.error("LDAP username and password do not match")
return []
else:
if _config("groupou"):
search_base = "ou={},{}".format(_config("groupou"), _config("basedn"))
else:
search_base = "{}".format(_config("basedn"))
search_string = "(&({}={})(objectClass={}))".format(
_config("accountattributename"), username, _config("groupclass")
)
search_results = bind.search_s(
search_base,
ldap.SCOPE_SUBTREE,
search_string,
[
salt.utils.stringutils.to_str(_config("accountattributename")),
"cn",
salt.utils.stringutils.to_str(_config("groupattribute")),
],
)
for _, entry in search_results:
if username in salt.utils.data.decode(
entry[_config("accountattributename")]
):
group_list.append(salt.utils.stringutils.to_unicode(entry["cn"][0]))
for user, entry in search_results:
if (
username
== salt.utils.stringutils.to_unicode(user)
.split(",")[0]
.split("=")[-1]
):
for group in salt.utils.data.decode(
entry[_config("groupattribute")]
):
group_list.append(
salt.utils.stringutils.to_unicode(group)
.split(",")[0]
.split("=")[-1]
)
log.debug("User %s is a member of groups: %s", username, group_list)
# Only test user auth on first call for job.
# 'show_jid' only exists on first payload so we can use that for the conditional.
if "show_jid" in kwargs and not _bind(
username,
kwargs.get("password"),
anonymous=_config("auth_by_group_membership_only", mandatory=False)
and _config("anonymous", mandatory=False),
):
log.error("LDAP username and password do not match")
return []
else:
log.error("ldap bind to determine group membership FAILED!")
return group_list
def __expand_ldap_entries(entries, opts=None):
"""
:param entries: ldap subtree in external_auth config option
:param opts: Opts to use when __opts__ not defined
:return: Dictionary with all allowed operations
Takes the ldap subtree in the external_auth config option and expands it
with actual minion names
webadmins%: <all users in the AD 'webadmins' group>
- server1
- .*
- ldap(OU=webservers,dc=int,dc=bigcompany,dc=com)
- test.ping
- service.restart
- ldap(OU=Domain Controllers,dc=int,dc=bigcompany,dc=com)
- allowed_fn_list_attribute^
This function only gets called if auth.ldap.activedirectory = True
"""
bind = _bind_for_search(opts=opts)
acl_tree = []
for user_or_group_dict in entries:
if not isinstance(user_or_group_dict, dict):
acl_tree.append(user_or_group_dict)
continue
for minion_or_ou, matchers in user_or_group_dict.items():
permissions = matchers
retrieved_minion_ids = []
if minion_or_ou.startswith("ldap("):
search_base = minion_or_ou.lstrip("ldap(").rstrip(")")
search_string = "(objectClass=computer)"
try:
search_results = bind.search_s(
search_base, ldap.SCOPE_SUBTREE, search_string, ["cn"]
)
for ldap_match in search_results:
try:
minion_id = ldap_match[1]["cn"][0].lower()
# Some LDAP/AD trees only have the FQDN of machines
# in their computer lists. auth.minion_stripdomains
# lets a user strip off configured domain names
# and arrive at the basic minion_id
if opts.get("auth.ldap.minion_stripdomains", None):
for domain in opts["auth.ldap.minion_stripdomains"]:
if minion_id.endswith(domain):
minion_id = minion_id[: -len(domain)]
break
retrieved_minion_ids.append(minion_id)
except TypeError:
# TypeError here just means that one of the returned
# entries didn't match the format we expected
# from LDAP.
pass
for minion_id in retrieved_minion_ids:
acl_tree.append({minion_id: permissions})
log.trace("Expanded acl_tree is: %s", acl_tree)
except ldap.NO_SUCH_OBJECT:
pass
else:
acl_tree.append({minion_or_ou: matchers})
log.trace("__expand_ldap_entries: %s", acl_tree)
return acl_tree
def process_acl(auth_list, opts=None):
"""
Query LDAP, retrieve list of minion_ids from an OU or other search.
For each minion_id returned from the LDAP search, copy the perms
matchers into the auth dictionary
:param auth_list:
:param opts: __opts__ for when __opts__ is not injected
:return: Modified auth list.
"""
ou_names = []
for item in auth_list:
if isinstance(item, str):
continue
ou_names.extend(
[
potential_ou
for potential_ou in item.keys()
if potential_ou.startswith("ldap(")
]
)
if ou_names:
auth_list = __expand_ldap_entries(auth_list, opts)
return auth_list
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from sahara import conductor as c
from sahara import context
from sahara import exceptions as e
from sahara.i18n import _
from sahara.i18n import _LE
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.service.edp import job_utils
from sahara.service.edp.oozie import engine as oozie_engine
from sahara.service.edp.spark import engine as spark_engine
from sahara.service.edp.storm import engine as storm_engine
from sahara.utils import edp
from sahara.utils import proxy as p
LOG = log.getLogger(__name__)
CONF = cfg.CONF
conductor = c.API
ENGINES = [oozie_engine.OozieJobEngine,
spark_engine.SparkJobEngine,
storm_engine.StormJobEngine]
def _get_job_type(job_execution):
return conductor.job_get(context.ctx(), job_execution.job_id).type
def _get_job_engine(cluster, job_execution):
return job_utils.get_plugin(cluster).get_edp_engine(cluster,
_get_job_type(
job_execution))
def _write_job_status(job_execution, job_info):
update = {"info": job_info}
if job_info['status'] in edp.JOB_STATUSES_TERMINATED:
update['end_time'] = datetime.datetime.now()
job_configs = p.delete_proxy_user_for_job_execution(job_execution)
if job_configs:
update['job_configs'] = job_configs
return conductor.job_execution_update(context.ctx(),
job_execution,
update)
def _update_job_status(engine, job_execution):
job_info = engine.get_job_status(job_execution)
if job_info is not None:
job_execution = _write_job_status(job_execution, job_info)
return job_execution
def _update_job_execution_extra(cluster, job_execution):
if ((CONF.use_namespaces and not CONF.use_floating_ips) or
CONF.proxy_command):
info = cluster.node_groups[0].instances[0].remote().get_neutron_info()
extra = job_execution.extra.copy()
extra['neutron'] = info
job_execution = conductor.job_execution_update(
context.ctx(), job_execution.id, {'extra': extra})
return job_execution
def _run_job(job_execution_id):
ctx = context.ctx()
job_execution = conductor.job_execution_get(ctx, job_execution_id)
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
if cluster.status != 'Active':
return
eng = _get_job_engine(cluster, job_execution)
if eng is None:
raise e.EDPError(_("Cluster does not support job type %s")
% _get_job_type(job_execution))
job_execution = _update_job_execution_extra(cluster, job_execution)
# Job id is a string
# Status is a string
# Extra is a dictionary to add to extra in the job_execution
jid, status, extra = eng.run_job(job_execution)
# Set the job id and the start time
# Optionally, update the status and the 'extra' field
update_dict = {'oozie_job_id': jid,
'start_time': datetime.datetime.now()}
if status:
update_dict['info'] = {'status': status}
if extra:
curr_extra = job_execution.extra.copy()
curr_extra.update(extra)
update_dict['extra'] = curr_extra
job_execution = conductor.job_execution_update(
ctx, job_execution, update_dict)
def run_job(job_execution_id):
try:
_run_job(job_execution_id)
except Exception as ex:
LOG.warning(
_LW("Can't run job execution (reason: {reason})").format(
reason=ex))
cancel_job(job_execution_id)
conductor.job_execution_update(
context.ctx(), job_execution_id,
{'info': {'status': edp.JOB_STATUS_FAILED},
'start_time': datetime.datetime.now(),
'end_time': datetime.datetime.now()})
def cancel_job(job_execution_id):
ctx = context.ctx()
job_execution = conductor.job_execution_get(ctx, job_execution_id)
if job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED:
return job_execution
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
if cluster is None:
return job_execution
engine = _get_job_engine(cluster, job_execution)
if engine is not None:
job_execution = conductor.job_execution_update(
ctx, job_execution_id,
{'info': {'status': edp.JOB_STATUS_TOBEKILLED}})
timeout = CONF.job_canceling_timeout
s_time = timeutils.utcnow()
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
if job_execution.info['status'] not in edp.JOB_STATUSES_TERMINATED:
try:
job_info = engine.cancel_job(job_execution)
except Exception as ex:
job_info = None
LOG.warning(
_LW("Error during cancel of job execution: "
"{error}").format(error=ex))
if job_info is not None:
job_execution = _write_job_status(job_execution, job_info)
LOG.info(_LI("Job execution was canceled successfully"))
return job_execution
context.sleep(3)
job_execution = conductor.job_execution_get(
ctx, job_execution_id)
if not job_execution:
LOG.info(_LI("Job execution was deleted. "
"Canceling current operation."))
return job_execution
else:
LOG.info(_LI("Job execution status: {status}").format(
status=job_execution.info['status']))
return job_execution
else:
raise e.CancelingFailed(_('Job execution %s was not canceled')
% job_execution.id)
def get_job_status(job_execution_id):
ctx = context.ctx()
job_execution = conductor.job_execution_get(ctx, job_execution_id)
cluster = conductor.cluster_get(ctx, job_execution.cluster_id)
if cluster is not None and cluster.status == 'Active':
engine = _get_job_engine(cluster, job_execution)
if engine is not None:
job_execution = _update_job_status(engine,
job_execution)
return job_execution
def update_job_statuses(cluster_id=None):
ctx = context.ctx()
kwargs = {'end_time': None}
if cluster_id:
kwargs.update({'cluster_id': cluster_id})
for je in conductor.job_execution_get_all(ctx, **kwargs):
try:
get_job_status(je.id)
except Exception as e:
LOG.error(_LE("Error during update job execution {job}: {error}")
.format(job=je.id, error=e))
def get_job_config_hints(job_type):
for eng in ENGINES:
if job_type in eng.get_supported_job_types():
return eng.get_possible_job_config(job_type)
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Helper script to repack paks for a list of locales.
Gyp doesn't have any built-in looping capability, so this just provides a way to
loop over a list of locales when repacking pak files, thus avoiding a
proliferation of mostly duplicate, cut-n-paste gyp actions.
"""
import optparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..',
'tools', 'grit'))
from grit.format import data_pack
# The gyp "branding" variable.
BRANDING = None
# Some build paths defined by gyp.
GRIT_DIR = None
SHARE_INT_DIR = None
INT_DIR = None
# The target platform. If it is not defined, sys.platform will be used.
OS = None
# Note that OS is normally set to 'linux' when building for chromeos.
CHROMEOS = False
USE_ASH = False
ENABLE_AUTOFILL_DIALOG = False
ENABLE_EXTENSIONS = False
WHITELIST = None
# Extra input files.
EXTRA_INPUT_FILES = []
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def calc_output(locale):
"""Determine the file that will be generated for the given locale."""
#e.g. '<(INTERMEDIATE_DIR)/repack/da.pak',
# For Fake Bidi, generate it at a fixed path so that tests can safely
# reference it.
if locale == 'fake-bidi':
return '%s/%s.pak' % (INT_DIR, locale)
if OS == 'mac' or OS == 'ios':
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if locale == 'en-US':
locale = 'en'
return '%s/repack/%s.lproj/locale.pak' % (INT_DIR, locale.replace('-', '_'))
else:
return os.path.join(INT_DIR, 'repack', locale + '.pak')
def calc_inputs(locale):
"""Determine the files that need processing for the given locale."""
inputs = []
#e.g. '<(grit_out_dir)/generated_resources_da.pak'
inputs.append(os.path.join(GRIT_DIR, 'generated_resources_%s.pak' % locale))
#e.g. '<(grit_out_dir)/locale_settings_da.pak'
inputs.append(os.path.join(GRIT_DIR, 'locale_settings_%s.pak' % locale))
#e.g. '<(grit_out_dir)/platform_locale_settings_da.pak'
inputs.append(os.path.join(GRIT_DIR,
'platform_locale_settings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/components/strings/
# components_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'components', 'strings',
'components_strings_%s.pak' % locale))
if USE_ASH:
#e.g. '<(SHARED_INTERMEDIATE_DIR)/ash/strings/ash_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'ash', 'strings',
'ash_strings_%s.pak' % locale))
if CHROMEOS:
inputs.append(os.path.join(SHARE_INT_DIR, 'ui', 'chromeos', 'strings',
'ui_chromeos_strings_%s.pak' % locale))
inputs.append(os.path.join(SHARE_INT_DIR, 'remoting', 'resources',
'%s.pak' % locale))
if OS != 'ios':
#e.g.
# '<(SHARED_INTERMEDIATE_DIR)/content/app/strings/content_strings_da.pak'
inputs.append(os.path.join(SHARE_INT_DIR, 'content', 'app', 'strings',
'content_strings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/device/bluetooth/strings/
# device_bluetooth_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'device', 'bluetooth', 'strings',
'device_bluetooth_strings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/ui/strings/ui_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'ui', 'strings',
'ui_strings_%s.pak' % locale))
#e.g. '<(SHARED_INTERMEDIATE_DIR)/ui/strings/app_locale_settings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'ui', 'strings',
'app_locale_settings_%s.pak' % locale))
else:
#e.g. '<(SHARED_INTERMEDIATE_DIR)/ios/chrome/ios_strings_resources_da.pak'
inputs.append(os.path.join(SHARE_INT_DIR, 'ios', 'chrome',
'ios_strings_resources_%s.pak' % locale))
if ENABLE_AUTOFILL_DIALOG:
#e.g. '<(SHARED_INTERMEDIATE_DIR)/third_party/libaddressinput/
# address_input_strings_da.pak',
inputs.append(os.path.join(SHARE_INT_DIR, 'third_party', 'libaddressinput',
'address_input_strings_%s.pak' % locale))
if ENABLE_EXTENSIONS:
# For example:
# '<(SHARED_INTERMEDIATE_DIR)/extensions/strings/extensions_strings_da.pak
# TODO(jamescook): When Android stops building extensions code move this
# to the OS != 'ios' and OS != 'android' section below.
inputs.append(os.path.join(SHARE_INT_DIR, 'extensions', 'strings',
'extensions_strings_%s.pak' % locale))
#e.g. '<(grit_out_dir)/google_chrome_strings_da.pak'
# or
# '<(grit_out_dir)/chromium_strings_da.pak'
inputs.append(os.path.join(
GRIT_DIR, '%s_strings_%s.pak' % (BRANDING, locale)))
# Add any extra input files.
for extra_file in EXTRA_INPUT_FILES:
inputs.append('%s_%s.pak' % (extra_file, locale))
return inputs
def list_outputs(locales):
"""Returns the names of files that will be generated for the given locales.
This is to provide gyp the list of output files, so build targets can
properly track what needs to be built.
"""
outputs = []
for locale in locales:
outputs.append(calc_output(locale))
# Quote each element so filename spaces don't mess up gyp's attempt to parse
# it into a list.
return " ".join(['"%s"' % x for x in outputs])
def list_inputs(locales):
"""Returns the names of files that will be processed for the given locales.
This is to provide gyp the list of input files, so build targets can properly
track their prerequisites.
"""
inputs = []
for locale in locales:
inputs += calc_inputs(locale)
# Quote each element so filename spaces don't mess up gyp's attempt to parse
# it into a list.
return " ".join(['"%s"' % x for x in inputs])
def repack_locales(locales):
""" Loop over and repack the given locales."""
for locale in locales:
inputs = []
inputs += calc_inputs(locale)
output = calc_output(locale)
data_pack.DataPack.RePack(output, inputs, whitelist_file=WHITELIST)
def DoMain(argv):
global BRANDING
global GRIT_DIR
global SHARE_INT_DIR
global INT_DIR
global OS
global CHROMEOS
global USE_ASH
global WHITELIST
global ENABLE_AUTOFILL_DIALOG
global ENABLE_EXTENSIONS
global EXTRA_INPUT_FILES
parser = optparse.OptionParser("usage: %prog [options] locales")
parser.add_option("-i", action="store_true", dest="inputs", default=False,
help="Print the expected input file list, then exit.")
parser.add_option("-o", action="store_true", dest="outputs", default=False,
help="Print the expected output file list, then exit.")
parser.add_option("-g", action="store", dest="grit_dir",
help="GRIT build files output directory.")
parser.add_option("-x", action="store", dest="int_dir",
help="Intermediate build files output directory.")
parser.add_option("-s", action="store", dest="share_int_dir",
help="Shared intermediate build files output directory.")
parser.add_option("-b", action="store", dest="branding",
help="Branding type of this build.")
parser.add_option("-e", action="append", dest="extra_input", default=[],
help="Full path to an extra input pak file without the\
locale suffix and \".pak\" extension.")
parser.add_option("-p", action="store", dest="os",
help="The target OS. (e.g. mac, linux, win, etc.)")
parser.add_option("--use-ash", action="store", dest="use_ash",
help="Whether to include ash strings")
parser.add_option("--chromeos", action="store",
help="Whether building for Chrome OS")
parser.add_option("--whitelist", action="store", help="Full path to the "
"whitelist used to filter output pak file resource IDs")
parser.add_option("--enable-autofill-dialog", action="store",
dest="enable_autofill_dialog",
help="Whether to include strings for autofill dialog")
parser.add_option("--enable-extensions", action="store",
dest="enable_extensions",
help="Whether to include strings for extensions")
options, locales = parser.parse_args(argv)
if not locales:
parser.error('Please specificy at least one locale to process.\n')
print_inputs = options.inputs
print_outputs = options.outputs
GRIT_DIR = options.grit_dir
INT_DIR = options.int_dir
SHARE_INT_DIR = options.share_int_dir
BRANDING = options.branding
EXTRA_INPUT_FILES = options.extra_input
OS = options.os
CHROMEOS = options.chromeos == '1'
USE_ASH = options.use_ash == '1'
WHITELIST = options.whitelist
ENABLE_AUTOFILL_DIALOG = options.enable_autofill_dialog == '1'
ENABLE_EXTENSIONS = options.enable_extensions == '1'
if not OS:
if sys.platform == 'darwin':
OS = 'mac'
elif sys.platform.startswith('linux'):
OS = 'linux'
elif sys.platform in ('cygwin', 'win32'):
OS = 'win'
else:
OS = sys.platform
if not (GRIT_DIR and INT_DIR and SHARE_INT_DIR):
parser.error('Please specify all of "-g" and "-x" and "-s".\n')
if print_inputs and print_outputs:
parser.error('Please specify only one of "-i" or "-o".\n')
# Need to know the branding, unless we're just listing the outputs.
if not print_outputs and not BRANDING:
parser.error('Please specify "-b" to determine the input files.\n')
if print_inputs:
return list_inputs(locales)
if print_outputs:
return list_outputs(locales)
return repack_locales(locales)
if __name__ == '__main__':
results = DoMain(sys.argv[1:])
if results:
print results
| |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import socket
import threading
from time import sleep
from os.path import expanduser, join, isfile
import paramiko
import paramiko.agent
from sshtunnel import SSHTunnelForwarder
from scp import SCPClient
from azure.cli.core.util import CLIError
from azure.cli.core.prompting import prompt_pass
def _load_key(key_filename):
pkey = None
try:
pkey = paramiko.RSAKey.from_private_key_file(key_filename, None)
except paramiko.PasswordRequiredException:
key_pass = prompt_pass('Password for private key:')
pkey = paramiko.RSAKey.from_private_key_file(key_filename, key_pass)
if pkey is None:
raise CLIError('failed to load key: {}'.format(key_filename))
return pkey
def _load_keys(key_filename=None, allow_agent=True):
keys = []
default_key_path = join(expanduser("~"), '.ssh', 'id_rsa')
if key_filename is not None:
key = _load_key(key_filename)
keys.append(key)
if allow_agent:
agent = paramiko.agent.Agent()
for key in agent.get_keys():
keys.append(key)
if not keys and isfile(default_key_path):
key = _load_key(default_key_path)
keys.append(key)
if not keys:
raise CLIError('No keys available in ssh agent or no key in {}. '
'Do you need to add keys to your ssh agent via '
'ssh-add or specify a --ssh-key-file?'.format(default_key_path))
return keys
def secure_copy(user, host, src, dest, key_filename=None, allow_agent=True):
keys = _load_keys(key_filename, allow_agent)
pkey = keys[0]
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, pkey=pkey)
scp = SCPClient(ssh.get_transport())
scp.get(src, dest)
scp.close()
class ACSClient(object):
def __init__(self, client=None):
self.client = client
self.transport = None
self.tunnel_server = None
self.host = None
self.username = None
self.port = None
def __del__(self):
if self.transport is not None:
self.transport.close()
if self.client is not None:
self.client.close()
if self.tunnel_server is not None:
self.tunnel_server.close_tunnel()
def connect(self, host, username, port=2200,
key_filename=None):
"""
Creates a connection to the remote server.
:param host: Remote host
:type host: String
:param username: User name to connect to the remote host
:type username: String
:param port: Remote host port
:type port: Number
"""
if not host:
raise ValueError('Host is missing')
if not username:
raise ValueError('Username is missing')
if not port:
raise ValueError('Missing port')
self.host = host
self.username = username
self.port = port
if self.client is None:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pkey = None
if key_filename is not None:
pkey = _load_key(key_filename)
self.client.connect(
hostname=host,
port=port,
username=username,
pkey=pkey)
self.transport = self.client.get_transport()
return self.transport is not None
def run(self, command, background=False):
"""
Runs a command on the remote host
:param command: Command to run on the remote host
:type command: String
:param background: True to run it in a separate thread,
False should be run in the foreground
:type command: Boolean
"""
if background:
t = threading.Thread(target=ACSClient._run_cmd, args=(self, command))
t.daemon = True
t.start()
return
return self._run_cmd(command)
def _run_cmd(self, command):
"""
Runs a command on the remote host
:param command: Command to run on the remote host
:type command: String
"""
if not command:
raise ValueError('Command is missing')
_, stdout, stderr = self.client.exec_command(command)
return stdout, stderr
def file_exists(self, file_path):
"""
Checks if file on the remote exists
:param file_path: Full path to the file on remote machine
:type file_path: String
"""
if not file_path:
raise ValueError('Missing file path')
if self.transport is None:
raise TypeError('Transport cannot be none')
sftp = self.transport.open_sftp_client()
result = None
try:
sftp.stat(file_path)
result = True
except IOError:
result = False
finally:
sftp.close()
return result
def create_tunnel(self, remote_host, remote_port, local_port=0):
"""
Creates a tunnel to the remote host
:param remote_host: Remote host to tunnel to
:type remote_host: String
:param remote_port: Remote port to tunnel to
:type remote_port: Number
:param local_port: Local port. If set to 0, random local port is selected
:type local_port: Number
"""
if local_port is 0:
local_port = self.get_available_local_port()
with SSHTunnelForwarder((self.host, self.port),
ssh_username=self.username,
remote_bind_address=(remote_host, remote_port),
local_bind_address=('0.0.0.0', local_port)):
try:
while True:
sleep(1)
except KeyboardInterrupt:
pass
@staticmethod
def get_available_local_port():
"""
Gets a random, available local port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # pylint: disable=no-member
s.bind(('', 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
| |
#!/usr/bin/env python
# Lint as: python3
"""Prometheus-based statistics collection."""
import collections
from typing import Dict, Text
import prometheus_client
import six
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import stats as rdf_stats
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import precondition
from grr_response_core.stats import stats_collector
from grr_response_core.stats import stats_utils
class _Metric(object):
"""A Metric that wraps a prometheus_client metrics.
Attributes:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
fields: A list of (field name, field type) tuples, defining the dimensions
of this metric.
metric: The underlying metric, an instance of prometheus_client.Counter,
Gauge, or Histogram.
"""
def __init__(self, metadata: rdf_stats.MetricMetadata,
registry: prometheus_client.registry.CollectorRegistry):
"""Instantiates a new _Metric.
Args:
metadata: An rdf_stats.MetricMetadata instance describing this _Metric.
registry: A prometheus_client.Registry instance.
Raises:
ValueError: metadata contains an unknown metric_type.
"""
self.metadata = metadata
self.fields = stats_utils.FieldDefinitionTuplesFromProtos(
metadata.fields_defs)
field_names = [name for name, _ in self.fields]
if metadata.metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
self.metric = prometheus_client.Counter(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
bins = metadata.bins or [
0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 4, 5, 6, 7, 8,
9, 10, 15, 20, 50, 100
]
self.metric = prometheus_client.Histogram(
metadata.varname,
metadata.docstring,
labelnames=field_names,
buckets=bins,
registry=registry)
elif metadata.metric_type == rdf_stats.MetricMetadata.MetricType.GAUGE:
self.metric = prometheus_client.Gauge(
metadata.varname,
metadata.docstring,
labelnames=field_names,
registry=registry)
else:
raise ValueError("Unknown metric type: {!r}".format(metadata.metric_type))
def Validate(self, fields):
if len(fields or ()) != len(self.fields):
raise ValueError(
"Statistic {} was created with {!r} fields, but a value with fields"
" {!r} was trying to be saved.".format(self.metadata.varname,
self.fields, fields))
def ForFields(self, fields) -> prometheus_client.metrics.MetricWrapperBase:
self.Validate(fields)
if fields:
return self.metric.labels(*fields)
else:
return self.metric
def __repr__(self):
return "<{} varname={!r} fields={!r} metric={!r}>".format(
compatibility.GetName(type(self)), self.metadata.varname, self.fields,
self.metric)
def _DistributionFromHistogram(metric, values_by_suffix):
"""Instantiate a rdf_stats.Distribution from a Prometheus Histogram.
Prometheus Histogram uses cumulative "buckets" lower or equal to an upper
bound. At instantiation, +Inf is implicitly appended to the upper bounds.
The delimiters [0.0, 0.1, 0.2 (, +Inf)] produce the following buckets:
Bucket "0.0" : -Inf <= values <= 0.0
Bucket "0.1" : -Inf <= values <= 0.1
Bucket "0.2" : -Inf <= values <= 0.2
Bucket "+Inf": -Inf <= values <= +Inf
Distribution uses exclusive bins greater or equal to a lower bound and
strictly lower than the next lower bound. At instantiation, -Inf is implicitly
prepended. The delimiters [(-Inf,) 0.0, 0.1, 0.2] produce the following bins:
Bin "-Inf": -Inf <= values < 0.0
Bin "0.0" : 0.0 <= values < 0.1
Bin "0.1" : 0.1 <= values < 0.2
Bin "0.2" : 0.2 <= values <= +Inf
Thus, Histogram buckets can be transformed to Distribution bins, by reading
in the same order and subtracting the value of the previous bin to remove the
cumulative sum. There is a slight incompatibility for values equal to bin
boundaries, because boundaries describe the upper bound for Prometheus and
the lower bound for our internal implementation.
Args:
metric: prometheus_stats_collector.Metric
values_by_suffix: dict of metric name suffixes and sample values lists
Returns:
rdf_stats.Distribution
Raises:
ValueError: The Histogram and metadata bin count do not match.
"""
dist = rdf_stats.Distribution(bins=list(metric.metadata.bins))
if metric.metadata.bins and len(dist.heights) != len(
values_by_suffix["_bucket"]):
raise ValueError(
"Trying to create Distribution with {} bins, but underlying"
"Histogram has {} buckets".format(
len(dist.heights), len(values_by_suffix["_bucket"])))
dist.heights = values_by_suffix["_bucket"]
# Remove cumulative sum by subtracting the value of the previous bin
for i in reversed(range(1, len(dist.heights))):
dist.heights[i] -= dist.heights[i - 1]
dist.count = values_by_suffix["_count"][0]
dist.sum = values_by_suffix["_sum"][0]
return dist
class PrometheusStatsCollector(stats_collector.StatsCollector):
"""Prometheus-based StatsCollector.
This StatsCollector maps native Counters and Gauges to their Prometheus
counterparts. Native Events are mapped to Prometheus Histograms.
Attributes:
lock: threading.Lock required by the utils.Synchronized decorator.
"""
def __init__(self, registry=None):
"""Instantiates a new PrometheusStatsCollector.
Args:
registry: An instance of prometheus_client.CollectorRegistry. If None, a
new CollectorRegistry is instantiated. Use prometheus_client.REGISTRY
for the global default registry.
"""
self._metrics = {} # type: Dict[Text, _Metric]
if registry is None:
self._registry = prometheus_client.CollectorRegistry(auto_describe=True)
else:
self._registry = registry
super().__init__()
def _InitializeMetric(self, metadata: rdf_stats.MetricMetadata):
self._metrics[metadata.varname] = _Metric(metadata, registry=self._registry)
@utils.Synchronized
def IncrementCounter(self, metric_name, delta=1, fields=None):
metric = self._metrics[metric_name]
counter = metric.ForFields(fields) # type: prometheus_client.Counter
counter.inc(delta)
@utils.Synchronized
def RecordEvent(self, metric_name, value, fields=None):
# TODO(user): decouple validation from implementation.
# Use validation wrapper approach in StatsCollector (similar to
# how it's done in REL_DB).
precondition.AssertType(value, six.integer_types + (float,))
metric = self._metrics[metric_name]
histogram = metric.ForFields(fields) # type: prometheus_client.Histogram
histogram.observe(value)
@utils.Synchronized
def SetGaugeValue(self, metric_name, value, fields=None):
metric = self._metrics[metric_name]
gauge = metric.ForFields(fields) # type: prometheus_client.Gauge
gauge.set(value)
@utils.Synchronized
def SetGaugeCallback(self, metric_name, callback, fields=None):
metric = self._metrics[metric_name]
gauge = metric.ForFields(fields) # type: prometheus_client.Gauge
gauge.set_function(callback)
@utils.Synchronized
def GetMetricFields(self, metric_name):
metric = self._metrics[metric_name]
if not metric.fields:
return []
field_tuples = set()
for prom_metric in metric.metric.collect():
for sample in prom_metric.samples:
labels = [sample.labels[field_name] for field_name, _ in metric.fields]
field_tuples.add(tuple(labels))
return list(field_tuples)
@utils.Synchronized
def GetMetricValue(self, metric_name, fields=None):
metric = self._metrics[metric_name]
metric_type = metric.metadata.metric_type
sub_metrics = metric.ForFields(fields).collect()
samples = [sample for sm in sub_metrics for sample in sm.samples]
values_by_suffix = collections.defaultdict(list)
for sample in samples:
suffix = sample.name.replace(metric_name, "")
values_by_suffix[suffix].append(sample.value)
if metric_type == rdf_stats.MetricMetadata.MetricType.EVENT:
return _DistributionFromHistogram(metric, values_by_suffix)
elif metric_type == rdf_stats.MetricMetadata.MetricType.COUNTER:
return values_by_suffix["_total"][0]
else:
return samples[-1].value
| |
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
from asn1crypto import pem, x509
from oscrypto import trust_list
from ._errors import pretty_message
from ._types import byte_cls, type_name
from .errors import PathBuildingError, DuplicateCertificateError
from .path import ValidationPath
class CertificateRegistry():
"""
Contains certificate lists used to build validation paths
"""
# A dict with keys being asn1crypto.x509.Certificate.Name.hashable byte
# string. Each value is a list of asn1crypto.x509.Certificate objects.
_subject_map = None
# A dict with keys being asn1crypto.x509.Certificate.key_identifier byte
# string. Each value is an asn1crypto.x509.Certificate object.
_key_identifier_map = None
# A dict with keys being asn1crypto.x509.Certificate.signature byte string.
# Each value is a bool - if the certificate is a CA cert.
_ca_lookup = None
def __init__(self, trust_roots=None, extra_trust_roots=None, other_certs=None):
"""
:param trust_roots:
If the operating system's trust list should not be used, instead
pass a list of byte strings containing DER or PEM-encoded X.509
certificates, or asn1crypto.x509.Certificate objects. These
certificates will be used as the trust roots for the path being
built.
:param extra_trust_roots:
If the operating system's trust list should be used, but augmented
with one or more extra certificates. This should be a list of byte
strings containing DER or PEM-encoded X.509 certificates, or
asn1crypto.x509.Certificate objects.
:param other_certs:
A list of byte strings containing DER or PEM-encoded X.509
certificates, or a list of asn1crypto.x509.Certificate objects.
These other certs are usually provided by the service/item being
validated. In SSL, these would be intermediate chain certs.
"""
if trust_roots is not None and not isinstance(trust_roots, list):
raise TypeError(pretty_message(
'''
trust_roots must be a list of byte strings or
asn1crypto.x509.Certificate objects, not %s
''',
type_name(trust_roots)
))
if extra_trust_roots is not None and not isinstance(extra_trust_roots, list):
raise TypeError(pretty_message(
'''
extra_trust_roots must be a list of byte strings or
asn1crypto.x509.Certificate objects, not %s
''',
type_name(extra_trust_roots)
))
if other_certs is not None and not isinstance(other_certs, list):
raise TypeError(pretty_message(
'''
other_certs must be a list of byte strings or
asn1crypto.x509.Certificate objects, not %s
''',
type_name(other_certs)
))
if other_certs is None:
other_certs = []
else:
other_certs = self._validate_unarmor(other_certs, 'other_certs')
if trust_roots is None:
trust_roots = [e[0] for e in trust_list.get_list()]
else:
trust_roots = self._validate_unarmor(trust_roots, 'trust_roots')
if extra_trust_roots is not None:
trust_roots.extend(self._validate_unarmor(extra_trust_roots, 'extra_trust_roots'))
self._subject_map = {}
self._key_identifier_map = {}
self._ca_lookup = {}
for trust_root in trust_roots:
hashable = trust_root.subject.hashable
if hashable not in self._subject_map:
self._subject_map[hashable] = []
self._subject_map[hashable].append(trust_root)
if trust_root.key_identifier:
self._key_identifier_map[trust_root.key_identifier] = trust_root
self._ca_lookup[trust_root.signature] = True
for other_cert in other_certs:
hashable = other_cert.subject.hashable
if hashable not in self._subject_map:
self._subject_map[hashable] = []
self._subject_map[hashable].append(other_cert)
if other_cert.key_identifier:
self._key_identifier_map[other_cert.key_identifier] = other_cert
def _validate_unarmor(self, certs, var_name):
"""
Takes a list of byte strings or asn1crypto.x509.Certificates objects,
validates and loads them while unarmoring any PEM-encoded contents
:param certs:
A list of byte strings or asn1crypto.x509.Certificate objects
:param var_name:
A unicode variable name to use in any TypeError exceptions
:return:
A list of asn1crypto.x509.Certificate objects
"""
output = []
for cert in certs:
if isinstance(cert, x509.Certificate):
output.append(cert)
else:
if not isinstance(cert, byte_cls):
raise TypeError(pretty_message(
'''
%s must contain only byte strings or
asn1crypto.x509.Certificate objects, not %s
''',
var_name,
type_name(cert)
))
if pem.detect(cert):
_, _, cert = pem.unarmor(cert)
output.append(x509.Certificate.load(cert))
return output
def is_ca(self, cert):
"""
Checks if a certificate is in the list of CA certs in this registry
:param cert:
An asn1crypto.x509.Certificate object
:return:
A boolean - if the certificate is in the CA list
"""
return self._ca_lookup.get(cert.signature, False)
def add_other_cert(self, cert):
"""
Allows adding an "other" cert that is obtained from doing revocation
check via OCSP or CRL, or some other method
:param cert:
An asn1crypto.x509.Certificate object or a byte string of a DER or
PEM-encoded certificate
:return:
A boolean indicating if the certificate was added - will return
False if the certificate was already present
"""
if not isinstance(cert, x509.Certificate):
if not isinstance(cert, byte_cls):
raise TypeError(pretty_message(
'''
cert must be a byte string or an instance of
asn1crypto.x509.Certificate, not %s
''',
type_name(cert)
))
if pem.detect(cert):
_, _, cert = pem.unarmor(cert)
cert = x509.Certificate.load(cert)
hashable = cert.subject.hashable
if hashable not in self._subject_map:
self._subject_map[hashable] = []
# Don't add the cert if we already have it
else:
serial_number = cert.serial_number
for existing_cert in self._subject_map[hashable]:
if existing_cert.serial_number == serial_number:
return False
self._subject_map[hashable].append(cert)
if cert.key_identifier:
self._key_identifier_map[cert.key_identifier] = cert
else:
self._key_identifier_map[cert.public_key.sha1] = cert
return True
def retrieve_by_key_identifier(self, key_identifier):
"""
Retrieves a cert via its key identifier
:param key_identifier:
A byte string of the key identifier
:return:
None or an asn1crypto.x509.Certificate object
"""
if not isinstance(key_identifier, byte_cls):
raise TypeError(pretty_message(
'''
key_identifier must be a byte string, not %s
''',
type_name(key_identifier)
))
return self._key_identifier_map.get(key_identifier)
def retrieve_by_name(self, name, first_certificate=None):
"""
Retrieves a list certs via their subject name
:param name:
An asn1crypto.x509.Name object
:param first_certificate:
An asn1crypto.x509.Certificate object that if found, should be
placed first in the result list
:return:
A list of asn1crypto.x509.Certificate objects
"""
if not isinstance(name, x509.Name):
raise TypeError(pretty_message(
'''
name must be an instance of asn1crypto.x509.Name, not %s
''',
type_name(name)
))
if first_certificate and not isinstance(first_certificate, x509.Certificate):
raise TypeError(pretty_message(
'''
first_certificate must be an instance of
asn1crypto.x509.Certificate, not %s
''',
type_name(first_certificate)
))
hashable = name.hashable
if hashable not in self._subject_map:
return []
certs = self._subject_map[hashable]
first = None
output = []
for cert in certs:
if first_certificate and first_certificate.sha256 == cert.sha256:
first = cert
else:
output.append(cert)
if first:
output.insert(0, first)
return output
def build_paths(self, end_entity_cert):
"""
Builds a list of ValidationPath objects from a certificate in the
operating system trust store to the end-entity certificate
:param end_entity_cert:
A byte string of a DER or PEM-encoded X.509 certificate, or an
instance of asn1crypto.x509.Certificate
:return:
A list of certvalidator.path.ValidationPath objects that represent
the possible paths from the end-entity certificate to one of the CA
certs.
"""
if not isinstance(end_entity_cert, byte_cls) and not isinstance(end_entity_cert, x509.Certificate):
raise TypeError(pretty_message(
'''
end_entity_cert must be a byte string or an instance of
asn1crypto.x509.Certificate, not %s
''',
type_name(end_entity_cert)
))
if isinstance(end_entity_cert, byte_cls):
if pem.detect(end_entity_cert):
_, _, end_entity_cert = pem.unarmor(end_entity_cert)
end_entity_cert = x509.Certificate.load(end_entity_cert)
path = ValidationPath(end_entity_cert)
paths = []
failed_paths = []
self._walk_issuers(path, paths, failed_paths)
if len(paths) == 0:
cert_name = end_entity_cert.subject.human_friendly
missing_issuer_name = failed_paths[0].first.issuer.human_friendly
raise PathBuildingError(pretty_message(
'''
Unable to build a validation path for the certificate "%s" - no
issuer matching "%s" was found
''',
cert_name,
missing_issuer_name
))
return paths
def _walk_issuers(self, path, paths, failed_paths):
"""
Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list
"""
if path.first.signature in self._ca_lookup:
paths.append(path)
return
new_branches = 0
for issuer in self._possible_issuers(path.first):
try:
self._walk_issuers(path.copy().prepend(issuer), paths, failed_paths)
new_branches += 1
except (DuplicateCertificateError):
pass
if not new_branches:
failed_paths.append(path)
def _possible_issuers(self, cert):
"""
Returns a generator that will list all possible issuers for the cert
:param cert:
An asn1crypto.x509.Certificate object to find the issuer of
"""
issuer_hashable = cert.issuer.hashable
if issuer_hashable not in self._subject_map:
return
for issuer in self._subject_map[issuer_hashable]:
# Info from the authority key identifier extension can be used to
# eliminate possible options when multiple keys with the same
# subject exist, such as during a transition, or with cross-signing.
if cert.authority_key_identifier and issuer.key_identifier:
if cert.authority_key_identifier != issuer.key_identifier:
continue
elif cert.authority_issuer_serial:
if cert.authority_issuer_serial != issuer.issuer_serial:
continue
yield issuer
| |
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2016 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
import six
class ascan(object):
def __init__(self, zap):
self.zap = zap
def status(self, scanid=None):
params = {}
if scanid is not None:
params['scanId'] = scanid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/status/', params)))
def scan_progress(self, scanid=None):
params = {}
if scanid is not None:
params['scanId'] = scanid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/scanProgress/', params)))
def messages_ids(self, scanid):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/messagesIds/', {'scanId': scanid})))
def alerts_ids(self, scanid):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/alertsIds/', {'scanId': scanid})))
@property
def scans(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/scans/')))
@property
def scan_policy_names(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/scanPolicyNames/')))
@property
def excluded_from_scan(self):
"""
Gets the regexes of URLs excluded from the active scans.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/excludedFromScan/')))
def scanners(self, scanpolicyname=None, policyid=None):
params = {}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
if policyid is not None:
params['policyId'] = policyid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/scanners/', params)))
def policies(self, scanpolicyname=None, policyid=None):
params = {}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
if policyid is not None:
params['policyId'] = policyid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/policies/', params)))
@property
def attack_mode_queue(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/attackModeQueue/')))
@property
def excluded_params(self):
"""
Gets all the parameters that are excluded. For each parameter the following are shown: the name, the URL, and the parameter type.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/excludedParams/')))
@property
def option_excluded_param_list(self):
"""
Use view excludedParams instead.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionExcludedParamList/')))
@property
def excluded_param_types(self):
"""
Gets all the types of excluded parameters. For each type the following are shown: the ID and the name.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/excludedParamTypes/')))
@property
def option_attack_policy(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionAttackPolicy/')))
@property
def option_default_policy(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionDefaultPolicy/')))
@property
def option_delay_in_ms(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionDelayInMs/')))
@property
def option_handle_anti_csrf_tokens(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionHandleAntiCSRFTokens/')))
@property
def option_host_per_scan(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionHostPerScan/')))
@property
def option_max_chart_time_in_mins(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxChartTimeInMins/')))
@property
def option_max_results_to_list(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxResultsToList/')))
@property
def option_max_rule_duration_in_mins(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxRuleDurationInMins/')))
@property
def option_max_scan_duration_in_mins(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxScanDurationInMins/')))
@property
def option_max_scans_in_ui(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionMaxScansInUI/')))
@property
def option_target_params_enabled_rpc(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionTargetParamsEnabledRPC/')))
@property
def option_target_params_injectable(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionTargetParamsInjectable/')))
@property
def option_thread_per_host(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionThreadPerHost/')))
@property
def option_allow_attack_on_start(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionAllowAttackOnStart/')))
@property
def option_inject_plugin_id_in_header(self):
"""
Tells whether or not the active scanner should inject the HTTP request header X-ZAP-Scan-ID, with the ID of the scanner that's sending the requests.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionInjectPluginIdInHeader/')))
@property
def option_prompt_in_attack_mode(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionPromptInAttackMode/')))
@property
def option_prompt_to_clear_finished_scans(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionPromptToClearFinishedScans/')))
@property
def option_rescan_in_attack_mode(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionRescanInAttackMode/')))
@property
def option_scan_headers_all_requests(self):
"""
Tells whether or not the HTTP Headers of all requests should be scanned. Not just requests that send parameters, through the query or request body.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionScanHeadersAllRequests/')))
@property
def option_show_advanced_dialog(self):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/view/optionShowAdvancedDialog/')))
def scan(self, url=None, recurse=None, inscopeonly=None, scanpolicyname=None, method=None, postdata=None, contextid=None, apikey=''):
"""
Runs the active scanner against the given URL and/or Context. Optionally, the 'recurse' parameter can be used to scan URLs under the given URL, the parameter 'inScopeOnly' can be used to constrain the scan to URLs that are in scope (ignored if a Context is specified), the parameter 'scanPolicyName' allows to specify the scan policy (if none is given it uses the default scan policy), the parameters 'method' and 'postData' allow to select a given request in conjunction with the given URL.
"""
params = {'apikey': apikey}
if url is not None:
params['url'] = url
if recurse is not None:
params['recurse'] = recurse
if inscopeonly is not None:
params['inScopeOnly'] = inscopeonly
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
if method is not None:
params['method'] = method
if postdata is not None:
params['postData'] = postdata
if contextid is not None:
params['contextId'] = contextid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/scan/', params)))
def scan_as_user(self, url=None, contextid=None, userid=None, recurse=None, scanpolicyname=None, method=None, postdata=None, apikey=''):
"""
Active Scans from the perspective of a User, obtained using the given Context ID and User ID. See 'scan' action for more details.
"""
params = {'apikey': apikey}
if url is not None:
params['url'] = url
if contextid is not None:
params['contextId'] = contextid
if userid is not None:
params['userId'] = userid
if recurse is not None:
params['recurse'] = recurse
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
if method is not None:
params['method'] = method
if postdata is not None:
params['postData'] = postdata
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/scanAsUser/', params)))
def pause(self, scanid, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/pause/', {'scanId': scanid, 'apikey': apikey})))
def resume(self, scanid, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/resume/', {'scanId': scanid, 'apikey': apikey})))
def stop(self, scanid, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/stop/', {'scanId': scanid, 'apikey': apikey})))
def remove_scan(self, scanid, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/removeScan/', {'scanId': scanid, 'apikey': apikey})))
def pause_all_scans(self, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/pauseAllScans/', {'apikey': apikey})))
def resume_all_scans(self, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/resumeAllScans/', {'apikey': apikey})))
def stop_all_scans(self, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/stopAllScans/', {'apikey': apikey})))
def remove_all_scans(self, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/removeAllScans/', {'apikey': apikey})))
def clear_excluded_from_scan(self, apikey=''):
"""
Clears the regexes of URLs excluded from the active scans.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/clearExcludedFromScan/', {'apikey': apikey})))
def exclude_from_scan(self, regex, apikey=''):
"""
Adds a regex of URLs that should be excluded from the active scans.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/excludeFromScan/', {'regex': regex, 'apikey': apikey})))
def enable_all_scanners(self, scanpolicyname=None, apikey=''):
params = {'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/enableAllScanners/', params)))
def disable_all_scanners(self, scanpolicyname=None, apikey=''):
params = {'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/disableAllScanners/', params)))
def enable_scanners(self, ids, scanpolicyname=None, apikey=''):
params = {'ids': ids, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/enableScanners/', params)))
def disable_scanners(self, ids, scanpolicyname=None, apikey=''):
params = {'ids': ids, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/disableScanners/', params)))
def set_enabled_policies(self, ids, scanpolicyname=None, apikey=''):
params = {'ids': ids, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setEnabledPolicies/', params)))
def set_policy_attack_strength(self, id, attackstrength, scanpolicyname=None, apikey=''):
params = {'id': id, 'attackStrength': attackstrength, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setPolicyAttackStrength/', params)))
def set_policy_alert_threshold(self, id, alertthreshold, scanpolicyname=None, apikey=''):
params = {'id': id, 'alertThreshold': alertthreshold, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setPolicyAlertThreshold/', params)))
def set_scanner_attack_strength(self, id, attackstrength, scanpolicyname=None, apikey=''):
params = {'id': id, 'attackStrength': attackstrength, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setScannerAttackStrength/', params)))
def set_scanner_alert_threshold(self, id, alertthreshold, scanpolicyname=None, apikey=''):
params = {'id': id, 'alertThreshold': alertthreshold, 'apikey': apikey}
if scanpolicyname is not None:
params['scanPolicyName'] = scanpolicyname
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setScannerAlertThreshold/', params)))
def add_scan_policy(self, scanpolicyname, alertthreshold=None, attackstrength=None, apikey=''):
params = {'scanPolicyName': scanpolicyname, 'apikey': apikey}
if alertthreshold is not None:
params['alertThreshold'] = alertthreshold
if attackstrength is not None:
params['attackStrength'] = attackstrength
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/addScanPolicy/', params)))
def remove_scan_policy(self, scanpolicyname, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/removeScanPolicy/', {'scanPolicyName': scanpolicyname, 'apikey': apikey})))
def update_scan_policy(self, scanpolicyname, alertthreshold=None, attackstrength=None, apikey=''):
params = {'scanPolicyName': scanpolicyname, 'apikey': apikey}
if alertthreshold is not None:
params['alertThreshold'] = alertthreshold
if attackstrength is not None:
params['attackStrength'] = attackstrength
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/updateScanPolicy/', params)))
def add_excluded_param(self, name, type=None, url=None, apikey=''):
"""
Adds a new parameter excluded from the scan, using the specified name. Optionally sets if the new entry applies to a specific URL (default, all URLs) and sets the ID of the type of the parameter (default, ID of any type). The type IDs can be obtained with the view excludedParamTypes.
"""
params = {'name': name, 'apikey': apikey}
if type is not None:
params['type'] = type
if url is not None:
params['url'] = url
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/addExcludedParam/', params)))
def modify_excluded_param(self, idx, name=None, type=None, url=None, apikey=''):
"""
Modifies a parameter excluded from the scan. Allows to modify the name, the URL and the type of parameter. The parameter is selected with its index, which can be obtained with the view excludedParams.
"""
params = {'idx': idx, 'apikey': apikey}
if name is not None:
params['name'] = name
if type is not None:
params['type'] = type
if url is not None:
params['url'] = url
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/modifyExcludedParam/', params)))
def remove_excluded_param(self, idx, apikey=''):
"""
Removes a parameter excluded from the scan, with the given index. The index can be obtained with the view excludedParams.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/removeExcludedParam/', {'idx': idx, 'apikey': apikey})))
def set_option_attack_policy(self, string, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionAttackPolicy/', {'String': string, 'apikey': apikey})))
def set_option_default_policy(self, string, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionDefaultPolicy/', {'String': string, 'apikey': apikey})))
def set_option_allow_attack_on_start(self, boolean, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionAllowAttackOnStart/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_delay_in_ms(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionDelayInMs/', {'Integer': integer, 'apikey': apikey})))
def set_option_handle_anti_csrf_tokens(self, boolean, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionHandleAntiCSRFTokens/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_host_per_scan(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionHostPerScan/', {'Integer': integer, 'apikey': apikey})))
def set_option_inject_plugin_id_in_header(self, boolean, apikey=''):
"""
Sets whether or not the active scanner should inject the HTTP request header X-ZAP-Scan-ID, with the ID of the scanner that's sending the requests.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionInjectPluginIdInHeader/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_max_chart_time_in_mins(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxChartTimeInMins/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_results_to_list(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxResultsToList/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_rule_duration_in_mins(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxRuleDurationInMins/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_scan_duration_in_mins(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxScanDurationInMins/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_scans_in_ui(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionMaxScansInUI/', {'Integer': integer, 'apikey': apikey})))
def set_option_prompt_in_attack_mode(self, boolean, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionPromptInAttackMode/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_prompt_to_clear_finished_scans(self, boolean, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionPromptToClearFinishedScans/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_rescan_in_attack_mode(self, boolean, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionRescanInAttackMode/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_scan_headers_all_requests(self, boolean, apikey=''):
"""
Sets whether or not the HTTP Headers of all requests should be scanned. Not just requests that send parameters, through the query or request body.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionScanHeadersAllRequests/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_show_advanced_dialog(self, boolean, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionShowAdvancedDialog/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_target_params_enabled_rpc(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionTargetParamsEnabledRPC/', {'Integer': integer, 'apikey': apikey})))
def set_option_target_params_injectable(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionTargetParamsInjectable/', {'Integer': integer, 'apikey': apikey})))
def set_option_thread_per_host(self, integer, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ascan/action/setOptionThreadPerHost/', {'Integer': integer, 'apikey': apikey})))
| |
import os
# Path helper
location = lambda x: os.path.join(
os.path.dirname(os.path.realpath(__file__)), x)
USE_TZ = True
DEBUG = True
TEMPLATE_DEBUG = True
SQL_DEBUG = True
ALLOWED_HOSTS = ['latest.oscarcommerce.com',
'sandbox.oscar.tangentlabs.co.uk',
'master.oscarcommerce.com']
# This is needed for the hosted version of the sandbox
ADMINS = (
('David Winterbottom', 'david.winterbottom@tangentlabs.co.uk'),
)
EMAIL_SUBJECT_PREFIX = '[Oscar sandbox] '
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
MANAGERS = ADMINS
# Use a Sqlite database by default
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': location('db.sqlite'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'ATOMIC_REQUESTS': True
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Django 1.7 changes the test runner default and tries to be helpful by
# alerting the user if the project looks like a pre-1.7 Django project by
# looking at various settings. We don't have any tests for the sandbox,
# but setting an explicit test runner disables the warning
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
# Includes all languages that have >50% coverage in Transifex
# Taken from Django's default setting for LANGUAGES
gettext_noop = lambda s: s
LANGUAGES = (
('en-gb', gettext_noop('British English')),
('zh-cn', gettext_noop('Simplified Chinese')),
('nl', gettext_noop('Dutch')),
('it', gettext_noop('Italian')),
('pl', gettext_noop('Polish')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('fr', gettext_noop('French')),
('de', gettext_noop('German')),
('ko', gettext_noop('Korean')),
('uk', gettext_noop('Ukrainian')),
('es', gettext_noop('Spanish')),
('da', gettext_noop('Danish')),
('ar', gettext_noop('Arabic')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('el', gettext_noop('Greek')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = location("public/media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
#ADMIN_MEDIA_PREFIX = '/media/admin/'
STATIC_URL = '/static/'
STATIC_ROOT = location('public/static')
STATICFILES_DIRS = (
location('static/'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$)a7n&o80u!6y5t-+jrd3)3!%vh&shg$wqpjpxc!ar&p#!)n1a'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# needed by django-treebeard for admin (and potentially other libs)
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
'oscar.apps.customer.notifications.context_processors.notifications',
)
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
# Allow languages to be selected
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
# Ensure a valid basket is added to the request instance for every request
'oscar.apps.basket.middleware.BasketMiddleware',
# Enable the ProfileMiddleware, then add ?cprofile to any
# URL path to print out profile details
#'oscar.profiling.middleware.ProfileMiddleware',
)
ROOT_URLCONF = 'urls'
# Add another path to Oscar's templates. This allows templates to be
# customised easily.
from oscar import OSCAR_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = (
location('templates'),
OSCAR_MAIN_TEMPLATE_DIR,
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s',
},
'simple': {
'format': '[%(asctime)s] %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'checkout_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'checkout.log',
'formatter': 'verbose'
},
'gateway_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'gateway.log',
'formatter': 'simple'
},
'error_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'errors.log',
'formatter': 'verbose'
},
'sorl_file': {
'level': 'INFO',
'class': 'oscar.core.logging.handlers.EnvFileHandler',
'filename': 'sorl.log',
'formatter': 'verbose'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
},
'loggers': {
# Django loggers
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins', 'error_file'],
'level': 'ERROR',
'propagate': False,
},
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
'level': 'DEBUG',
},
# Oscar core loggers
'oscar.checkout': {
'handlers': ['console', 'checkout_file'],
'propagate': False,
'level': 'INFO',
},
'oscar.catalogue.import': {
'handlers': ['console'],
'propagate': False,
'level': 'INFO',
},
'oscar.alerts': {
'handlers': ['null'],
'propagate': False,
'level': 'INFO',
},
# Sandbox logging
'gateway': {
'handlers': ['gateway_file'],
'propagate': True,
'level': 'INFO',
},
# Third party
'south': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'sorl.thumbnail': {
'handlers': ['sorl_file'],
'propagate': True,
'level': 'INFO',
},
# Suppress output of this debug toolbar panel
'template_timings_panel': {
'handlers': ['null'],
'level': 'DEBUG',
'propagate': False,
}
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django_extensions',
# Debug toolbar + extensions
'debug_toolbar',
'template_timings_panel',
'compressor', # Oscar's templates use compressor
'apps.gateway', # For allowing dashboard access
]
from oscar import get_core_apps
INSTALLED_APPS = INSTALLED_APPS + get_core_apps()
# As we use the sandbox to create both South migrations and native ones,
# the sandbox needs to work both with Django < 1.7 and 1.7
import django
if django.VERSION < (1, 7):
INSTALLED_APPS.append('south')
# Add Oscar's custom auth backend so users can sign in using their email
# address.
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/'
APPEND_SLASH = True
# Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': location('whoosh_index'),
},
}
# =============
# Debug Toolbar
# =============
# Implicit setup can often lead to problems with circular imports, so we
# explicitly wire up the toolbar
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
INTERNAL_IPS = ['127.0.0.1', '::1']
# ==============
# Oscar settings
# ==============
from oscar.defaults import *
# Meta
# ====
OSCAR_SHOP_TAGLINE = 'Sandbox'
OSCAR_RECENTLY_VIEWED_PRODUCTS = 20
OSCAR_ALLOW_ANON_CHECKOUT = True
# This is added to each template context by the core context processor. It is
# useful for test/stage/qa sites where you want to show the version of the site
# in the page title.
DISPLAY_VERSION = False
# Order processing
# ================
# Sample order/line status settings. This is quite simplistic. It's like you'll
# want to override the set_status method on the order object to do more
# sophisticated things.
OSCAR_INITIAL_ORDER_STATUS = 'Pending'
OSCAR_INITIAL_LINE_STATUS = 'Pending'
# This dict defines the new order statuses than an order can move to
OSCAR_ORDER_STATUS_PIPELINE = {
'Pending': ('Being processed', 'Cancelled',),
'Being processed': ('Complete', 'Cancelled',),
'Cancelled': (),
'Complete': (),
}
# This dict defines the line statuses that will be set when an order's status
# is changed
OSCAR_ORDER_STATUS_CASCADE = {
'Being processed': 'Being processed',
'Cancelled': 'Cancelled',
'Complete': 'Shipped',
}
# LESS/CSS/statics
# ================
# We default to using CSS files, rather than the LESS files that generate them.
# If you want to develop Oscar's CSS, then set USE_LESS=True and
# COMPRESS_ENABLED=False in your settings_local module and ensure you have
# 'lessc' installed. You can do this by running:
#
# pip install -r requirements_less.txt
#
# which will install node.js and less in your virtualenv.
USE_LESS = False
COMPRESS_ENABLED = True
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
COMPRESS_OFFLINE_CONTEXT = {
'STATIC_URL': 'STATIC_URL',
'use_less': USE_LESS,
}
# We do this to work around an issue in compressor where the LESS files are
# compiled but compression isn't enabled. When this happens, the relative URL
# is wrong between the generated CSS file and other assets:
# https://github.com/jezdez/django_compressor/issues/226
COMPRESS_OUTPUT_DIR = 'oscar'
# Logging
# =======
LOG_ROOT = location('logs')
# Ensure log root exists
if not os.path.exists(LOG_ROOT):
os.mkdir(LOG_ROOT)
# Sorl
# ====
THUMBNAIL_DEBUG = True
THUMBNAIL_KEY_PREFIX = 'oscar-sandbox'
# Django 1.6 has switched to JSON serializing for security reasons, but it does not
# serialize Models. We should resolve this by extending the
# django/core/serializers/json.Serializer to have the `dumps` function. Also
# in tests/config.py
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# Try and import local settings which can be used to override any of the above.
try:
from settings_local import *
except ImportError:
pass
| |
import errno
import json
import os
import platform
import shutil
import stat
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from zipfile import ZipFile
import requests
import toml
from packaging.requirements import Requirement
from packaging.specifiers import SpecifierSet
from virtualenv import cli_run
try:
from shutil import which
except ImportError:
from distutils import spawn
which = spawn.find_executable # type: ignore
HERE = os.path.dirname(os.path.abspath(__file__))
ON_WINDOWS = platform.system() == 'Windows'
def handle_remove_readonly(func, path, exc): # no cov
# PermissionError: [WinError 5] Access is denied: '...\\.git\\...'
if func in (os.rmdir, os.remove, os.unlink) and exc[1].errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
func(path)
else:
raise
class EnvVars(dict):
def __init__(self, env_vars=None, ignore=None):
super(EnvVars, self).__init__(os.environ)
self.old_env = dict(self)
if env_vars is not None:
self.update(env_vars)
if ignore is not None:
for env_var in ignore:
self.pop(env_var, None)
def __enter__(self):
os.environ.clear()
os.environ.update(self)
def __exit__(self, exc_type, exc_value, traceback):
os.environ.clear()
os.environ.update(self.old_env)
def python_version_supported(project_config):
requires_python = project_config['project'].get('requires-python', '')
if requires_python:
python_constraint = SpecifierSet(requires_python)
if not python_constraint.contains(str('.'.join(map(str, sys.version_info[:2])))):
return False
return True
def download_file(url, file_name):
response = requests.get(url, stream=True)
with open(file_name, 'wb') as f:
for chunk in response.iter_content(16384):
f.write(chunk)
@contextmanager
def temp_dir():
d = tempfile.mkdtemp()
try:
d = os.path.realpath(d)
yield d
finally:
shutil.rmtree(d, ignore_errors=False, onerror=handle_remove_readonly)
def main():
original_backend_path = os.path.dirname(os.path.dirname(HERE))
with temp_dir() as links_dir, temp_dir() as build_dir:
print('<<<<< Copying backend >>>>>')
backend_path = os.path.join(build_dir, 'backend')
shutil.copytree(original_backend_path, backend_path)
# Increment the minor version
version_file = os.path.join(backend_path, 'src', 'hatchling', '__about__.py')
with open(version_file, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line.startswith('__version__'):
version = line.strip().split(' = ')[1].strip('\'"')
version_parts = version.split('.')
version_parts[1] = str(int(version_parts[1]) + 1)
lines[i] = line.replace(version, '.'.join(version_parts))
break
else:
raise ValueError('No version found')
with open(version_file, 'w') as f:
f.writelines(lines)
print('<<<<< Building backend >>>>>')
subprocess.check_call([sys.executable, '-m', 'build', '--wheel', '-o', links_dir, backend_path])
subprocess.check_call(
[
sys.executable,
'-m',
'pip',
'download',
'-q',
'--disable-pip-version-check',
'--no-python-version-warning',
'-d',
links_dir,
os.path.join(links_dir, os.listdir(links_dir)[0]),
]
)
for project in os.listdir(HERE):
project_dir = os.path.join(HERE, project)
if not os.path.isdir(project_dir):
continue
print('<<<<< Project: {} >>>>>'.format(project))
project_config = {}
potential_project_file = os.path.join(project_dir, 'pyproject.toml')
# Not yet ported
if os.path.isfile(potential_project_file):
with open(potential_project_file, 'r') as f:
project_config.update(toml.loads(f.read()))
if not python_version_supported(project_config):
print('--> Unsupported version of Python, skipping')
continue
with open(os.path.join(project_dir, 'data.json'), 'r') as f:
test_data = json.loads(f.read())
with temp_dir() as d:
if 'repo_url' in test_data:
print('--> Cloning repository')
repo_dir = os.path.join(d, 'repo')
subprocess.check_call(['git', 'clone', '-q', '--depth', '1', test_data['repo_url'], repo_dir])
else:
archive_name = '{}.zip'.format(project)
archive_path = os.path.join(d, archive_name)
print('--> Downloading archive')
download_file(test_data['archive_url'], archive_path)
with ZipFile(archive_path) as zip_file:
zip_file.extractall(d)
entries = os.listdir(d)
entries.remove(archive_name)
repo_dir = os.path.join(d, entries[0])
project_file = os.path.join(repo_dir, 'pyproject.toml')
if project_config:
shutil.copyfile(potential_project_file, project_file)
else:
if not os.path.isfile(project_file):
sys.exit('--> Missing file: pyproject.toml')
with open(project_file, 'r') as f:
project_config.update(toml.loads(f.read()))
for requirement in project_config.get('build-system', {}).get('requires', []):
if Requirement(requirement).name == 'hatchling':
break
else:
sys.exit('--> Field `build-system.requires` must specify `hatchling` as a requirement')
if not python_version_supported(project_config):
print('--> Unsupported version of Python, skipping')
continue
for file_name in ('MANIFEST.in', 'setup.cfg', 'setup.py'):
possible_path = os.path.join(repo_dir, file_name)
if os.path.isfile(possible_path):
os.remove(possible_path)
venv_dir = os.path.join(d, '.venv')
print('--> Creating virtual environment')
cli_run([venv_dir, '--no-download', '--no-periodic-update'])
env_vars = dict(test_data.get('env_vars', {}))
env_vars['VIRTUAL_ENV'] = venv_dir
env_vars['PATH'] = '{}{}{}'.format(
os.path.join(venv_dir, 'Scripts' if ON_WINDOWS else 'bin'), os.pathsep, os.environ['PATH']
)
with EnvVars(env_vars, ignore=('__PYVENV_LAUNCHER__', 'PYTHONHOME')):
print('--> Installing project')
subprocess.check_call(
[
which('pip'),
'install',
'-q',
'--disable-pip-version-check',
'--no-python-version-warning',
'--find-links',
links_dir,
'--no-deps',
repo_dir,
]
)
print('--> Installing dependencies')
subprocess.check_call(
[
which('pip'),
'install',
'-q',
'--disable-pip-version-check',
'--no-python-version-warning',
repo_dir,
]
)
print('--> Testing package')
for statement in test_data['statements']:
subprocess.check_call([which('python'), '-c', statement])
scripts = project_config['project'].get('scripts', {})
if scripts:
print('--> Testing scripts')
for script in scripts:
if not which(script):
sys.exit('--> Could not locate script: {}'.format(script))
print('--> Success!')
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Expenditure'
db.create_table('lobbyingph_expenditure', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('communication', self.gf('django.db.models.fields.SmallIntegerField')()),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lobbyingph.Category'])),
('issue', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lobbyingph.Issue'], null=True, blank=True)),
('bill', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lobbyingph.Bill'], null=True, blank=True)),
('position', self.gf('django.db.models.fields.SmallIntegerField')(null=True, blank=True)),
('other_desc', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('filing', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lobbyingph.Filing'])),
))
db.send_create_signal('lobbyingph', ['Expenditure'])
# Adding M2M table for field agencies on 'Expenditure'
db.create_table('lobbyingph_expenditure_agencies', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('expenditure', models.ForeignKey(orm['lobbyingph.expenditure'], null=False)),
('agency', models.ForeignKey(orm['lobbyingph.agency'], null=False))
))
db.create_unique('lobbyingph_expenditure_agencies', ['expenditure_id', 'agency_id'])
# Adding M2M table for field officials on 'Expenditure'
db.create_table('lobbyingph_expenditure_officials', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('expenditure', models.ForeignKey(orm['lobbyingph.expenditure'], null=False)),
('official', models.ForeignKey(orm['lobbyingph.official'], null=False))
))
db.create_unique('lobbyingph_expenditure_officials', ['expenditure_id', 'official_id'])
# Adding M2M table for field methods on 'Expenditure'
db.create_table('lobbyingph_expenditure_methods', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('expenditure', models.ForeignKey(orm['lobbyingph.expenditure'], null=False)),
('communication_method', models.ForeignKey(orm['lobbyingph.communication_method'], null=False))
))
db.create_unique('lobbyingph_expenditure_methods', ['expenditure_id', 'communication_method_id'])
# Adding M2M table for field groups on 'Expenditure'
db.create_table('lobbyingph_expenditure_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('expenditure', models.ForeignKey(orm['lobbyingph.expenditure'], null=False)),
('receipent_group', models.ForeignKey(orm['lobbyingph.receipent_group'], null=False))
))
db.create_unique('lobbyingph_expenditure_groups', ['expenditure_id', 'receipent_group_id'])
def backwards(self, orm):
# Deleting model 'Expenditure'
db.delete_table('lobbyingph_expenditure')
# Removing M2M table for field agencies on 'Expenditure'
db.delete_table('lobbyingph_expenditure_agencies')
# Removing M2M table for field officials on 'Expenditure'
db.delete_table('lobbyingph_expenditure_officials')
# Removing M2M table for field methods on 'Expenditure'
db.delete_table('lobbyingph_expenditure_methods')
# Removing M2M table for field groups on 'Expenditure'
db.delete_table('lobbyingph_expenditure_groups')
models = {
'lobbyingph.agency': {
'Meta': {'ordering': "['name']", 'object_name': 'Agency'},
'alias': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.article': {
'Meta': {'ordering': "['-date']", 'object_name': 'Article'},
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 10, 22, 0, 0)'}),
'headline': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Issue']", 'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'quote': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'lobbyingph.bill': {
'Meta': {'ordering': "['number']", 'object_name': 'Bill'},
'bill_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '10'}),
'url': ('django.db.models.fields.URLField', [], {'default': "'http://legislation.phila.gov/detailreport/?key='", 'max_length': '200'})
},
'lobbyingph.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.communication_method': {
'Meta': {'ordering': "['name']", 'object_name': 'Communication_Method'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.exp_direct_comm': {
'Meta': {'object_name': 'Exp_Direct_Comm'},
'agencies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Bill']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Category']"}),
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Issue']", 'null': 'True', 'blank': 'True'}),
'officials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Official']", 'null': 'True', 'blank': 'True'}),
'other_desc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'lobbyingph.exp_indirect_comm': {
'Meta': {'object_name': 'Exp_Indirect_Comm'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Bill']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Category']"}),
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Receipent_Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Issue']", 'null': 'True', 'blank': 'True'}),
'methods': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Communication_Method']", 'null': 'True', 'blank': 'True'}),
'officials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Official']", 'null': 'True', 'blank': 'True'}),
'other_desc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.SmallIntegerField', [], {})
},
'lobbyingph.exp_other': {
'Meta': {'object_name': 'Exp_Other'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Agency']", 'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'official': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Official']", 'null': 'True'}),
'place': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Principal']", 'null': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'})
},
'lobbyingph.expenditure': {
'Meta': {'object_name': 'Expenditure'},
'agencies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Bill']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Category']"}),
'communication': ('django.db.models.fields.SmallIntegerField', [], {}),
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']"}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Receipent_Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Issue']", 'null': 'True', 'blank': 'True'}),
'methods': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Communication_Method']", 'null': 'True', 'blank': 'True'}),
'officials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Official']", 'null': 'True', 'blank': 'True'}),
'other_desc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'lobbyingph.filing': {
'Meta': {'object_name': 'Filing'},
'corrected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'error_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'errors': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'firms': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Firm']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lobbyists': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Lobbyist']", 'null': 'True', 'blank': 'True'}),
'principal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Principal']", 'null': 'True', 'blank': 'True'}),
'quarter': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'total_exp_direct_comm': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'}),
'total_exp_indirect_comm': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'}),
'total_exp_other': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '12', 'decimal_places': '2'}),
'year': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'})
},
'lobbyingph.firm': {
'Meta': {'ordering': "['name']", 'object_name': 'Firm'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'lobbyingph.issue': {
'Meta': {'ordering': "['description']", 'object_name': 'Issue'},
'bill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Bill']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'detail_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lobbyingph.lobbyist': {
'Meta': {'ordering': "['name']", 'object_name': 'Lobbyist'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'firm': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Firm']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'principals': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lobbyingph.Principal']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'lobbyingph.official': {
'Meta': {'ordering': "['last_name']", 'object_name': 'Official'},
'agency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Agency']", 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'lobbyingph.principal': {
'Meta': {'ordering': "['name']", 'object_name': 'Principal'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'address3': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'lobbyingph.receipent_group': {
'Meta': {'ordering': "['name']", 'object_name': 'Receipent_Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lobbyingph.source': {
'Meta': {'ordering': "['name']", 'object_name': 'Source'},
'filing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lobbyingph.Filing']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['lobbyingph']
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Plot flux graphs."""
import numpy as np
from astropy import log
from astropy.time import Time
import copy
from . import config, graph
############
# CONSTANTS
############
DEFAULT_MIN_METEORS = 100 # meteors
DEFAULT_MIN_ECA = 50000 # km^2 h
DEFAULT_MIN_ECA_STATION = -0.1 # Was 0.05
DEFAULT_GAMMA = 1.5
DEFAULT_POPINDEX = 2.2 # population index
##########
# CLASSES
#########
class BaseProfile(object):
"""Abstract base class."""
def __init__(self, fluxdb, ymax=None):
self.fluxdb = fluxdb
self.ymax = ymax
def field(self, key):
"""Returns a data field"""
return np.array([row[key] for row in self.fluxes])
def get_response(self):
"""Returns the flux profile in JSON format.
Returns
-------
dict
"""
result = {}
if self.fluxes is None or len(self.fluxes) == 0:
log.error('No suitable data found.')
result['status'] = 'WARNING'
result['msg'] = 'No suitable data found.'
else:
result['status'] = 'OK'
result['graph'] = self.save_graph()
result['flux'] = []
for row in self.fluxes:
newrow = []
# Averaged profiles do not have a time field
if row['time']:
newrow.append(str(row['time'])[0:16])
newrow.extend(('{:.3f}'.format(row['solarlon']),
'{:.1f}'.format(row['teff']),
'{:.1f}'.format(row['eca']),
'{0}'.format(row['met']),
'{:.1f} ± {:.1f}'.format(row['flux'], row['e_flux']),
'{:.0f}'.format(row['zhr'])))
result['flux'].append(newrow)
return result
def save_graph(self):
"""Creates a graph and returns the filename.
"""
mygraph = self.graph()
return mygraph.save()
class VideoProfile(BaseProfile):
def __init__(self, fluxdb,
shower, start, stop,
min_interval=1, max_interval=24,
min_meteors=DEFAULT_MIN_METEORS,
min_eca=DEFAULT_MIN_ECA,
min_alt=10,
min_eca_station=DEFAULT_MIN_ECA_STATION,
gamma=DEFAULT_GAMMA,
popindex=DEFAULT_POPINDEX,
ymax=None):
"""
Parameters
----------
shower : string
IMO shower code
start : string
ISO timestamp
stop : string
ISO timestamp
min_meteors : int
Minimum number of meteors in each bin.
min_eca : float [10^3 km^2 h]
Minimum ECA in each bin.
min_interval : float [hours]
max_interval : float [hours]
min_alt : float [degrees]
Minimum radiant altitude for a flux record to be included.
min_eca_station : float [degrees]
Minimum ECA for a flux record to be included.
gamma : float
Zenith correction exponent.
popindex : float
Population index.
Returns
-------
Result of the query.
"""
BaseProfile.__init__(self, fluxdb, ymax=ymax)
if isinstance(start, Time):
self.start = start
else:
self.start = Time(start, scale='utc')
if isinstance(stop, Time):
self.stop = stop
else:
self.stop = Time(stop, scale='utc')
self.popindex = popindex
self.gamma = gamma
self.fluxes = self.fluxdb.query("""SELECT * FROM
VideoProfile(%s,
%s::timestamp,
%s::timestamp,
%s, %s,
'%s hours'::interval,
'%s hours'::interval,
%s, %s, %s, %s)
""", (shower,
self.start.isot,
self.stop.isot,
min_meteors, min_eca,
min_interval, max_interval,
min_alt, min_eca_station,
gamma, popindex, ))
def graph(self):
mygraph = graph.VideoGraph(self, ymax=self.ymax)
mygraph.plot()
return mygraph
class SolVideoProfile(BaseProfile):
def __init__(self, fluxdb, shower,
year, start, stop,
min_interval=1, max_interval=24,
min_meteors=DEFAULT_MIN_METEORS,
min_eca=DEFAULT_MIN_ECA,
min_alt=10,
min_eca_station=DEFAULT_MIN_ECA_STATION,
gamma=DEFAULT_GAMMA,
popindex=DEFAULT_POPINDEX,
ymax=None,
label=None,
marker='s'):
"""
Parameters
----------
shower : string
IMO shower code
years : int
e.g. 2012
start : float [degrees]
Solar longitude.
stop : float [degrees]
Solar longitude.
min_meteors : int
Minimum number of meteors in each bin.
min_eca : float [10^3 km^2 h]
Minimum ECA in each bin.
min_interval : float [hours]
max_interval : float [hours]
min_alt : float [degrees]
Minimum radiant altitude for a flux record to be included.
min_eca_station : float [degrees]
Minimum ECA for a flux record to be included.
gamma : float
Zenith correction exponent.
popindex : float
Population index.
Returns
-------
Result of the query.
"""
BaseProfile.__init__(self, fluxdb, ymax=ymax)
self.shower = shower
self.year = year
self.start = start
self.stop = stop
self.popindex = popindex
self.gamma = gamma
if label != None:
self.label = label
else:
#self.label = '{0} {1}'.format(shower, year)
self.label = str(year)
self.marker = marker
self.fluxes = self.fluxdb.query("""SELECT * FROM
SolVideoProfile(%s,
%s, %s, %s,
%s, %s,
%s, %s,
%s, %s, %s, %s)
""", (shower,
year, start, stop,
min_meteors, min_eca,
min_interval, max_interval,
min_alt, min_eca_station,
gamma, popindex, ))
def graph(self):
mygraph = graph.SolVideoGraph(self, ymax=self.ymax)
mygraph.plot()
return mygraph
class AvgVideoProfile(BaseProfile):
def __init__(self, fluxdb, shower,
years, start, stop,
min_interval=1, max_interval=24,
min_meteors=DEFAULT_MIN_METEORS,
min_eca=DEFAULT_MIN_ECA,
min_alt=10,
min_eca_station=DEFAULT_MIN_ECA_STATION,
gamma=DEFAULT_GAMMA,
popindex=DEFAULT_POPINDEX,
ymax=None,
label=None,
marker='s'):
"""
Parameters
----------
shower : string
IMO shower code
years : list
e.g. [2011,2012]
start : float [degrees]
Solar longitude.
stop : float [degrees]
Solar longitude.
min_meteors : int
Minimum number of meteors in each bin.
min_eca : float [10^3 km^2 h]
Minimum ECA in each bin.
min_interval : float [hours]
max_interval : float [hours]
min_alt : float [degrees]
Minimum radiant altitude for a flux record to be included.
min_eca_station : float [degrees]
Minimum ECA for a flux record to be included.
gamma : float
Zenith correction exponent.
popindex : float
Population index.
Returns
-------
Result of the query.
"""
BaseProfile.__init__(self, fluxdb, ymax=ymax)
self.shower = shower
self.years = years
self.start = start
self.stop = stop
self.popindex = popindex
self.gamma = gamma
if label != None:
self.label = label
elif len(years) == 1:
self.label = years[0]
else:
#self.label = '{0} {1}'.format(shower, year)
self.label = shower
self.marker = marker
self.fluxes = self.fluxdb.query("""SELECT * FROM
AvgVideoProfile(%s,
%s::int[], %s, %s,
%s, %s,
%s, %s,
%s, %s, %s, %s)
""", (shower,
years, start, stop,
min_meteors, min_eca,
min_interval, max_interval,
min_alt, min_eca_station,
gamma, popindex, ))
def graph(self):
mygraph = graph.SolVideoGraph(self, ymax=self.ymax)
mygraph.plot()
return mygraph
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import numpy
import rmgpy.quantity
from rmgpy.molecule import Molecule
from rmgpy.species import Species
from rmgpy.reaction import Reaction
from rmgpy.kinetics import Arrhenius
from rmgpy.thermo import ThermoData
from rmgpy.solver.simple import SimpleReactor
from rmgpy.solver.base import TerminationTime, TerminationConversion
import rmgpy.constants as constants
################################################################################
class SimpleReactorCheck(unittest.TestCase):
def testSolve(self):
"""
Test the simple batch reactor with a simple kinetic model. Here we
choose a kinetic model consisting of the hydrogen abstraction reaction
CH4 + C2H5 <=> CH3 + C2H6.
"""
CH4 = Species(
molecule=[Molecule().fromSMILES("C")],
thermo=ThermoData(Tdata=([300,400,500,600,800,1000,1500],"K"), Cpdata=([ 8.615, 9.687,10.963,12.301,14.841,16.976,20.528],"cal/(mol*K)"), H298=(-17.714,"kcal/mol"), S298=(44.472,"cal/(mol*K)"))
)
CH3 = Species(
molecule=[Molecule().fromSMILES("[CH3]")],
thermo=ThermoData(Tdata=([300,400,500,600,800,1000,1500],"K"), Cpdata=([ 9.397,10.123,10.856,11.571,12.899,14.055,16.195],"cal/(mol*K)"), H298=( 9.357,"kcal/mol"), S298=(45.174,"cal/(mol*K)"))
)
C2H6 = Species(
molecule=[Molecule().fromSMILES("CC")],
thermo=ThermoData(Tdata=([300,400,500,600,800,1000,1500],"K"), Cpdata=([12.684,15.506,18.326,20.971,25.500,29.016,34.595],"cal/(mol*K)"), H298=(-19.521,"kcal/mol"), S298=(54.799,"cal/(mol*K)"))
)
C2H5 = Species(
molecule=[Molecule().fromSMILES("C[CH2]")],
thermo=ThermoData(Tdata=([300,400,500,600,800,1000,1500],"K"), Cpdata=([11.635,13.744,16.085,18.246,21.885,24.676,29.107],"cal/(mol*K)"), H298=( 29.496,"kcal/mol"), S298=(56.687,"cal/(mol*K)"))
)
rxn1 = Reaction(reactants=[C2H6,CH3], products=[C2H5,CH4], kinetics=Arrhenius(A=(686.375*6,'m^3/(mol*s)'), n=4.40721, Ea=(7.82799,'kcal/mol'), T0=(298.15,'K')))
coreSpecies = [CH4,CH3,C2H6,C2H5]
edgeSpecies = []
coreReactions = [rxn1]
edgeReactions = []
T = 1000; P = 1.0e5
rxnSystem = SimpleReactor(T, P, initialMoleFractions={C2H5: 0.1, CH3: 0.1, CH4: 0.4, C2H6: 0.4}, termination=[])
rxnSystem.initializeModel(coreSpecies, coreReactions, edgeSpecies, edgeReactions)
tlist = numpy.array([10**(i/10.0) for i in range(-130, -49)], numpy.float64)
# Integrate to get the solution at each time point
t = []; y = []; reactionRates = []; speciesRates = []
for t1 in tlist:
rxnSystem.advance(t1)
t.append(rxnSystem.t)
# You must make a copy of y because it is overwritten by DASSL at
# each call to advance()
y.append(rxnSystem.y.copy())
reactionRates.append(rxnSystem.coreReactionRates.copy())
speciesRates.append(rxnSystem.coreSpeciesRates.copy())
# Convert the solution vectors to numpy arrays
t = numpy.array(t, numpy.float64)
y = numpy.array(y, numpy.float64)
reactionRates = numpy.array(reactionRates, numpy.float64)
speciesRates = numpy.array(speciesRates, numpy.float64)
V = constants.R * rxnSystem.T.value_si * numpy.sum(y) / rxnSystem.P.value_si
# Check that we're computing the species fluxes correctly
for i in range(t.shape[0]):
self.assertAlmostEqual(reactionRates[i,0], speciesRates[i,0], delta=1e-6*reactionRates[i,0])
self.assertAlmostEqual(reactionRates[i,0], -speciesRates[i,1], delta=1e-6*reactionRates[i,0])
self.assertAlmostEqual(reactionRates[i,0], -speciesRates[i,2], delta=1e-6*reactionRates[i,0])
self.assertAlmostEqual(reactionRates[i,0], speciesRates[i,3], delta=1e-6*reactionRates[i,0])
# Check that we've reached equilibrium
self.assertAlmostEqual(reactionRates[-1,0], 0.0, delta=1e-2)
#######
# Unit test for the jacobian function:
# Solve a reaction system and check if the analytical jacobian matches the finite difference jacobian
H2 = Species(
molecule=[Molecule().fromSMILES("[H][H]")],
thermo=ThermoData(Tdata=([300,400,500,600,800,1000,1500],"K"), Cpdata=([6.89,6.97,6.99,7.01,7.08,7.22,7.72],"cal/(mol*K)"), H298=( 0,"kcal/mol"), S298=(31.23,"cal/(mol*K)"))
)
rxnList = []
rxnList.append(Reaction(reactants=[C2H6], products=[CH3,CH3], kinetics=Arrhenius(A=(686.375*6,'1/s'), n=4.40721, Ea=(7.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[CH3,CH3], products=[C2H6], kinetics=Arrhenius(A=(686.375*6,'m^3/(mol*s)'), n=4.40721, Ea=(7.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[C2H6,CH3], products=[C2H5,CH4], kinetics=Arrhenius(A=(46.375*6,'m^3/(mol*s)'), n=3.40721, Ea=(6.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[C2H5,CH4], products=[C2H6,CH3], kinetics=Arrhenius(A=(46.375*6,'m^3/(mol*s)'), n=3.40721, Ea=(6.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[C2H5,CH4], products=[CH3,CH3,CH3], kinetics=Arrhenius(A=(246.375*6,'m^3/(mol*s)'), n=1.40721, Ea=(3.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[CH3,CH3,CH3], products=[C2H5,CH4], kinetics=Arrhenius(A=(246.375*6,'m^6/(mol^2*s)'), n=1.40721, Ea=(3.82799,'kcal/mol'), T0=(298.15,'K'))))#
rxnList.append(Reaction(reactants=[C2H6,CH3,CH3], products=[C2H5,C2H5,H2], kinetics=Arrhenius(A=(146.375*6,'m^6/(mol^2*s)'), n=2.40721, Ea=(8.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[C2H5,C2H5,H2], products=[C2H6,CH3,CH3], kinetics=Arrhenius(A=(146.375*6,'m^6/(mol^2*s)'), n=2.40721, Ea=(8.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[C2H6,C2H6], products=[CH3,CH4,C2H5], kinetics=Arrhenius(A=(1246.375*6,'m^3/(mol*s)'), n=0.40721, Ea=(8.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[CH3,CH4,C2H5], products=[C2H6,C2H6], kinetics=Arrhenius(A=(46.375*6,'m^6/(mol^2*s)'), n=0.10721, Ea=(8.82799,'kcal/mol'), T0=(298.15,'K'))))
for rxn in rxnList:
coreSpecies = [CH4,CH3,C2H6,C2H5,H2]
edgeSpecies = []
coreReactions = [rxn]
rxnSystem0 = SimpleReactor(T,P,initialMoleFractions={CH4:0.2,CH3:0.1,C2H6:0.35,C2H5:0.15, H2:0.2},termination=[])
rxnSystem0.initializeModel(coreSpecies, coreReactions, edgeSpecies, edgeReactions)
dydt0 = rxnSystem0.residual(0.0, rxnSystem0.y, numpy.zeros(rxnSystem0.y.shape))[0]
numCoreSpecies = len(coreSpecies)
dN = .000001*sum(rxnSystem0.y)
dN_array = dN*numpy.eye(numCoreSpecies)
dydt = []
for i in range(numCoreSpecies):
rxnSystem0.y[i] += dN
dydt.append(rxnSystem0.residual(0.0, rxnSystem0.y, numpy.zeros(rxnSystem0.y.shape))[0])
rxnSystem0.y[i] -= dN # reset y to original y0
# Let the solver compute the jacobian
solverJacobian = rxnSystem0.jacobian(0.0, rxnSystem0.y, dydt0, 0.0)
# Compute the jacobian using finite differences
jacobian = numpy.zeros((numCoreSpecies, numCoreSpecies))
for i in range(numCoreSpecies):
for j in range(numCoreSpecies):
jacobian[i,j] = (dydt[j][i]-dydt0[i])/dN
self.assertAlmostEqual(jacobian[i,j], solverJacobian[i,j], delta=abs(1e-4*jacobian[i,j]))
#print 'Solver jacobian'
#print solverJacobian
#print 'Numerical jacobian'
#print jacobian
###
# Unit test for the compute rate derivative
rxnList = []
rxnList.append(Reaction(reactants=[C2H6], products=[CH3,CH3], kinetics=Arrhenius(A=(686.375e6,'1/s'), n=4.40721, Ea=(7.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[C2H6,CH3], products=[C2H5,CH4], kinetics=Arrhenius(A=(46.375*6,'m^3/(mol*s)'), n=3.40721, Ea=(6.82799,'kcal/mol'), T0=(298.15,'K'))))
rxnList.append(Reaction(reactants=[C2H6,CH3,CH3], products=[C2H5,C2H5,H2], kinetics=Arrhenius(A=(146.375*6,'m^6/(mol^2*s)'), n=2.40721, Ea=(8.82799,'kcal/mol'), T0=(298.15,'K'))))
coreSpecies = [CH4,CH3,C2H6,C2H5,H2]
edgeSpecies = []
coreReactions = rxnList
rxnSystem0 = SimpleReactor(T,P,initialMoleFractions={CH4:0.2,CH3:0.1,C2H6:0.35,C2H5:0.15, H2:0.2},termination=[])
rxnSystem0.initializeModel(coreSpecies, coreReactions, edgeSpecies, edgeReactions)
dfdt0 = rxnSystem0.residual(0.0, rxnSystem0.y, numpy.zeros(rxnSystem0.y.shape))[0]
solver_dfdk = rxnSystem0.computeRateDerivative()
#print 'Solver d(dy/dt)/dk'
#print solver_dfdk
integrationTime = 1e-8
rxnSystem0.termination.append(TerminationTime((integrationTime,'s')))
rxnSystem0.simulate(coreSpecies, coreReactions, [], [], 0, 1, 0)
y0 = rxnSystem0.y
dfdk = numpy.zeros((numCoreSpecies,len(rxnList))) # d(dy/dt)/dk
for i in range(len(rxnList)):
k0 = rxnList[i].getRateCoefficient(T,P)
rxnList[i].kinetics.A.value_si = rxnList[i].kinetics.A.value_si*(1+1e-3)
dk = rxnList[i].getRateCoefficient(T,P) - k0
rxnSystem = SimpleReactor(T,P,initialMoleFractions={CH4:0.2,CH3:0.1,C2H6:0.35,C2H5:0.15, H2:0.2},termination=[])
rxnSystem.initializeModel(coreSpecies, coreReactions, edgeSpecies, edgeReactions)
dfdt = rxnSystem.residual(0.0, rxnSystem.y, numpy.zeros(rxnSystem.y.shape))[0]
dfdk[:,i]=(dfdt-dfdt0)/dk
rxnSystem.termination.append(TerminationTime((integrationTime,'s')))
rxnSystem.simulate(coreSpecies, coreReactions, [], [], 0, 1, 0)
rxnList[i].kinetics.A.value_si = rxnList[i].kinetics.A.value_si/(1+1e-3) # reset A factor
for i in range(numCoreSpecies):
for j in range(len(rxnList)):
self.assertAlmostEqual(dfdk[i,j], solver_dfdk[i,j], delta=abs(1e-3*dfdk[i,j]))
#print 'Numerical d(dy/dt)/dk'
#print dfdk
# # Visualize the simulation results
# import pylab
# fig = pylab.figure(figsize=(6,6))
# pylab.subplot(2,1,1)
# pylab.semilogx(t, y)
# pylab.ylabel('Concentration (mol/m$^\\mathdefault{3}$)')
# pylab.legend(['CH4', 'CH3', 'C2H6', 'C2H5'], loc=4)
# pylab.subplot(2,1,2)
# pylab.semilogx(t, speciesRates)
# pylab.legend(['CH4', 'CH3', 'C2H6', 'C2H5'], loc=4)
# pylab.xlabel('Time (s)')
# pylab.ylabel('Rate (mol/m$^\\mathdefault{3}$*s)')
# fig.subplots_adjust(left=0.12, bottom=0.10, right=0.95, top=0.95, wspace=0.20, hspace=0.35)
# pylab.show()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_put_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_patch_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class MonitoringSettingsOperations(object):
"""MonitoringSettingsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> "_models.MonitoringSettingResource":
"""Get the Monitoring Setting and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MonitoringSettingResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def _update_put_initial(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> "_models.MonitoringSettingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(monitoring_setting_resource, 'MonitoringSettingResource')
request = build_update_put_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._update_put_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_put_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
@distributed_trace
def begin_update_put(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> LROPoller["_models.MonitoringSettingResource"]:
"""Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource:
~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_put_initial(
resource_group_name=resource_group_name,
service_name=service_name,
monitoring_setting_resource=monitoring_setting_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def _update_patch_initial(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> "_models.MonitoringSettingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(monitoring_setting_resource, 'MonitoringSettingResource')
request = build_update_patch_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._update_patch_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_patch_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
@distributed_trace
def begin_update_patch(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> LROPoller["_models.MonitoringSettingResource"]:
"""Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource:
~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.MonitoringSettingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_patch_initial(
resource_group_name=resource_group_name,
service_name=service_name,
monitoring_setting_resource=monitoring_setting_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
| |
from .requests_mixin import RequestsMixin
from .exceptions import ResponseDispatchError
from .exceptions import InvalidMessageNameException
from .exceptions import AuthenticationError, InvalidParametersError, AuthorizationError, ResourceNotFoundError
import concurrent.futures
import time
class MessageSenderMixin(RequestsMixin):
def __init__(self):
super().__init__()
self.response_callbacks = {}
self.outstanding_responses = {}
self.should_dispatch_async = True
self._add_hooks('pre_poll_responses', 'post_poll_responses', 'pre_dispatch_responses', 'post_dispatch_responses')
def send(self, message_name, payload, callback=None):
"""
Sends a message to the HPIT server. Messages are the meat of how
HPIT works. All messages are asyncronous and non-blocking. Responses
are not guarenteed to be prompt due to the distributed nature of HPIT. However,
it is our goal to ensure response times are as minimal as possible. Responses times
are dependendent on the network, HPIT server load, and the networking and processing
constraints of the plugins.
Each message consists of two main things:
- message_name: string - The name of the message we are sending to HPIT. This will determine
which plugins recieves the message.
- payload: dict - A Python dictionary of the data to send to HPIT and to whatever plugins
are listening to and will process the message. The dictionary will be converted to JSON in
route to HPIT.
Optionally you can pass a callback, and as this message sender polls HPIT for responses
!!!IF!!! it recieved such a response from a plugin your callback will be called to handle
the response with any information from the plugin.
Returns: requests.Response : class - A request.Response object returned from submission
of the message. This is not the eventual response from HPIT. It is simply an acknowledgement
the data was recieved. You must send in a callback to handle the actual HPIT response.
"""
if message_name == "transaction":
raise InvalidMessageNameException("Cannot use message_name 'transaction'. Use send_transaction() method for datashop transactions.")
response = self._post_data('message', {
'name': message_name,
'payload': payload
}).json()
if callback:
self.response_callbacks[response['message_id']] = callback
self.outstanding_responses[response["message_id"]] = 1
return response
def send_transaction(self, payload, callback= None):
"""
This method functions identially as send, but inserts "transaction" as the message.
This is specifically for DataShop transactions.
See send() method for more details.
"""
response = self._post_data('transaction', {
'payload': payload
}).json()
if callback:
self.response_callbacks[response['message_id']] = callback
self.outstanding_responses[response["message_id"]] = 1
return response
def _poll_responses(self):
"""
This function polls HPIT for responses to messages we submitted earlier on.
Hooks:
self.pre_poll_responses - If set to a callable, will be called before the poll
request is made to HPIT.
self.post_poll_responses - If set to a callable, will be called after the poll
request is made to HPIT.
Returns: dict - The list of responses from the server for earlier messages
submitted by this message sender to HPIT.
"""
if not self._try_hook('pre_poll_responses'):
return False
if self.outstanding_responses:
responses = self._get_data('response/list')['responses']
else:
responses = {}
if not self._try_hook('post_poll_responses'):
return False
return responses
def _dispatch_responses(self, responses):
"""
This function is responsible for dispatching responses to earlier message to
their callbacks that were set when the transcation was sent with self.send.
Hooks:
self.pre_dispatch_responses - If set to a callable, will be called before the
responses are dispatched to their respective callbacks.
self.post_dispatch_responses - If set to a callable, will be called after the
responses are dispatched to their respective callbacks.
Returns: boolean - True if event loop should continue. False if event loop should
abort.
"""
if not self._try_hook('pre_dispatch_responses'):
return False
if self.should_dispatch_async and len(responses) > 1:
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for res in responses:
executor.submit(self._dispatch_response, res)
elif len(responses) == 1:
self._dispatch_response(responses[0])
if not self._try_hook('post_dispatch_responses'):
return False
return True
def _dispatch_response(self, res):
try:
message_id = res['message']['message_id']
except KeyError:
self.send_log_entry('Invalid response from HPIT. No message id supplied in response.')
return
try:
response_payload = res['response']
except KeyError:
self.send_log_entry('Invalid response from HPIT. No response payload supplied.')
return
if message_id not in self.response_callbacks:
self.send_log_entry('No callback registered for message id: ' + message_id)
return
if not callable(self.response_callbacks[message_id]):
self.send_log_entry("Callback registered for transcation id: " + message_id + " is not a callable.")
return
response_payload["message_id"] = message_id #inject the message id in the response
self.response_callbacks[message_id](response_payload)
self.outstanding_responses[message_id] -=1
if self.outstanding_responses[message_id] <=0:
del self.outstanding_responses[message_id]
#Plugin or Tutor can query Message Owner
def get_message_owner(self, message_name):
"""
Sends a blocking request to the HPIT server to get information about who will recieve
a particular message that is sent through the system.
Returns:
entity_id - The owner of the message.
None - No one "owns" this message.
Throws:
AuthenticationError - This entity is not signed into HPIT.
InvalidParametersError - message_name is empty or None
"""
if not message_name:
raise InvalidParametersError('message_name is empty or None')
if not isinstance(message_name, str):
raise InvalidParametersError('message_name must be a string')
try:
response = self._get_data('/'.join(['message-owner', message_name]))
except ResourceNotFoundError:
return None
return response['owner']
#Plugin or Tutor can be Resource Owner
def share_resource(self, resource_token, other_entity_ids):
"""
Sends a blocking request to the HPIT server to share a particular resource with entities
other than it's original owner. Only the resource owner can send this request. Once a plugin
tells HPIT who the owner of a resource is, only that owner (NOT THE PLUGIN) can make this
request.
Input:
resource_token - The resource token, as assigned by HPIT in a secure_resource request.
other_entity_ids - Other entities who may view, edit, and work with this resource.
Returns:
True - All went well and now the other entities can view, edit, and work with this resource.
Throws:
AuthenticationError - This entity is not signed into HPIT.
InvalidParametersError - The resource_token or other_entity_ids is invalid or empty.
AuthorizationError - This entity is not the owner of this resource.
"""
if not resource_token:
raise InvalidParametersError('resource_token is empty or None')
if not other_entity_ids:
raise InvalidParametersError('other_entity_ids is empty or None')
if not isinstance(resource_token, str):
raise InvalidParametersError('message_name must be a string')
if not isinstance(other_entity_ids, str) and not isinstance(other_entity_ids, list):
raise InvalidParametersError('other_entity_ids must be a string or a list')
response = self._post_data('share-resource', {
'resource_id': resource_token,
'other_entity_ids': other_entity_ids
})
if 'error' in response and response['error'] == 'not owner':
raise AuthorizationError('This entity is not the owner of this message.')
#Bad responses will cause an exception. We can safely just return true.
return True
| |
import os
import json
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
from django.conf import settings
try:
from __main__ import display as global_display
except ImportError:
from ansible.utils.display import Display
global_display = Display()
class AdHocCallback(CallbackBase):
def __init__(self, runner, host_list):
super(AdHocCallback, self).__init__()
self.runner = runner
self.host_list = host_list
@staticmethod
def __extract_result(result):
return result._host.get_name(), result._result
def __save_result(self, host, status, message, result):
runner_task = self.runner.task_set.latest('id')
query_set = runner_task.result_set.filter(host=host)
host = query_set[0]
host.status = status
host.message = message
host.response = result
host.save()
def v2_playbook_on_play_start(self, play):
self.runner.status = 'running'
self.runner.save()
def v2_playbook_on_task_start(self, task, is_conditional):
runner_task = self.runner.task_set.create(name=task.get_name().strip())
for host in self.host_list:
runner_task.result_set.create(host=host, status='started', response='{}')
def v2_playbook_on_no_hosts_matched(self):
self.runner.message = 'No hosts matched'
self.runner.save()
def v2_playbook_on_stats(self, stats):
print 'play stats: ' + str(stats)
def v2_runner_on_failed(self, result, ignore_errors=False):
host, response = self.__extract_result(result)
module = str(response['invocation']['module_name'])
message = module + ' failed'
if 'exception' in response:
message = 'Exception raised'
response = [response]
elif module == 'shell' or module == 'script':
message = response['stdout'] + response['stderr']
self.__save_result(host, 'failed', message, response)
def v2_runner_on_ok(self, result):
host, response = self.__extract_result(result)
module = str(response['invocation']['module_name'])
message = module + ' successful'
status = 'ok'
if module == 'setup':
facts = {'ansible_facts': response['ansible_facts']}
filename = (os.path.join(settings.FACTS_DIR, host))
with open(filename, "w") as f:
f.write(json.dumps(facts, indent=4))
response['ansible_facts'] = 'saved to file'
message = 'Facts saved to ' + filename
elif module == 'command' or module == 'script':
message = response['stdout'] + response['stderr']
elif response['changed']:
status = 'changed'
self.__save_result(host, status, message, response)
def v2_runner_on_skipped(self, result):
host, response = self.__extract_result(result)
self.__save_result(host, 'skipped', host + ' skipped', {})
def v2_runner_on_unreachable(self, result):
host, response = self.__extract_result(result)
if 'msg' in response:
message = response['msg']
else:
message = 'Host unreachable'
response = [response]
self.__save_result(host, 'unreachable', message, response)
class PlaybookCallback(CallbackBase):
def __init__(self, runner, host_list):
super(PlaybookCallback, self).__init__()
self.runner = runner
self.host_list = host_list
@staticmethod
def _extract_result(result):
return result._host.get_name(), result._result
def _save_result(self, host, status, message, result):
runner_task = self.runner.task_set.latest('id')
query_set = runner_task.result_set.filter(host=host)
host = query_set[0]
host.status = status
host.message = message
host.response = result
host.save()
def v2_playbook_on_play_start(self, play):
self.runner.status = 'running'
self.runner.save()
def v2_playbook_on_task_start(self, task, is_conditional):
runner_task = self.runner.task_set.create(name=task.get_name().strip(), module=task.action)
for host in self.host_list:
runner_task.result_set.create(host=host, status='started', response='{}')
def v2_playbook_on_no_hosts_matched(self):
self.runner.message = 'No hosts matched'
self.runner.save()
def v2_playbook_on_stats(self, stats):
print 'play stats: ' + str(stats)
def v2_runner_on_failed(self, result, ignore_errors=False):
host, response = self._extract_result(result)
module = self.runner.task_set.latest('id').module
message = module + ' failed'
if 'exception' in response:
message = 'Exception raised'
response = [response]
elif module == 'command' or module == 'script':
message = response['stdout'] + response['stderr']
elif 'msg' in response:
message = response['msg']
self._save_result(host, 'failed', message, response)
def v2_runner_on_ok(self, result):
host, response = self._extract_result(result)
module = self.runner.task_set.latest('id').module
message = module + ' successful'
status = 'ok'
if module == 'setup':
facts = {'ansible_facts': response['ansible_facts']}
filename = (os.path.join(settings.FACTS_DIR, host))
with open(filename, "w") as f:
f.write(json.dumps(facts, indent=4))
response['ansible_facts'] = 'saved to file'
message = 'Facts saved to ' + filename
elif module == 'command' or module == 'script':
message = response['stdout'] + response['stderr']
elif response['changed']:
status = 'changed'
self._save_result(host, status, message, response)
def v2_runner_on_skipped(self, result):
host, response = self._extract_result(result)
self._save_result(host, 'skipped', host + ' skipped', {})
def v2_runner_on_unreachable(self, result):
host, response = self._extract_result(result)
if 'msg' in response:
message = response['msg']
else:
message = 'Host unreachable'
response = [response]
self._save_result(host, 'unreachable', message, response)
class TestCallback(CallbackBase):
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
def on_file_diff(self, host, diff):
pass
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
host = result._host.get_name()
self.runner_on_skipped(host, self._get_item(getattr(result._result,'results',{})))
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
def v2_runner_on_no_hosts(self, task):
self.runner_on_no_hosts()
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
#FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_runner_on_file_diff(self, result, diff):
pass #no v1 correspondance
def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
def v2_playbook_on_notify(self, result, handler):
host = result._host.get_name()
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task, is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_handler_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default)
def v2_playbook_on_setup(self):
self.playbook_on_setup()
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
def v2_on_file_diff(self, result):
host = result._host.get_name()
if 'diff' in result._result:
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_item_ok(self, result):
pass # no v1
def v2_playbook_on_item_failed(self, result):
pass # no v1
def v2_playbook_on_item_skipped(self, result):
pass # no v1
def v2_playbook_on_include(self, included_file):
pass #no v1 correspondance
def v2_playbook_item_on_ok(self, result):
pass
def v2_playbook_item_on_failed(self, result):
pass
def v2_playbook_item_on_skipped(self, result):
pass
| |
#coding: UTF-8
from seahub.share.models import FileShare, UploadLinkShare
from seahub.test_utils import BaseTestCase
from tests.common.utils import urljoin, randstring
from tests.api.apitestbase import ApiTestBase
from tests.api.urls import SHARED_LINKS_URL
#class SharesApiTest(ApiTestBase):
# def test_create_file_shared_link(self):
# with self.get_tmp_repo() as repo:
# fname, _ = self.create_file(repo)
# fsurl = urljoin(repo.file_url, 'shared-link')
# data = {
# 'type': 'f',
# 'p': '/' + fname,
# }
# res = self.put(fsurl, data=data, expected=201)
# self.assertRegexpMatches(res.headers['Location'], \
# r'http(.*)/f/(\w{10,10})/')
#
# res = self.get(SHARED_LINKS_URL).json()
# self.assertNotEmpty(res)
# for fileshare in res['fileshares']:
# self.assertIsNotNone(fileshare['username'])
# self.assertIsNotNone(fileshare['repo_id'])
# #self.assertIsNotNone(fileshare['ctime'])
# self.assertIsNotNone(fileshare['s_type'])
# self.assertIsNotNone(fileshare['token'])
# self.assertIsNotNone(fileshare['view_cnt'])
# self.assertIsNotNone(fileshare['path'])
#
#
class FileSharedLinkApiTest(BaseTestCase):
def tearDown(self):
self.remove_repo()
def test_create_file_shared_link_with_invalid_path(self):
self.login_as(self.user)
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"ps=%s&type=f" % (self.file),
'application/x-www-form-urlencoded',
)
self.assertEqual(400, resp.status_code)
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"type=f",
'application/x-www-form-urlencoded',
)
self.assertEqual(400, resp.status_code)
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=f" % randstring(6),
'application/x-www-form-urlencoded',
)
self.assertEqual(400, resp.status_code)
def test_can_create_file_download_link(self):
self.login_as(self.user)
# create file download share link
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=f" % (self.file),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.assertRegexpMatches(resp._headers['location'][1], \
r'http(.*)/f/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
self.assertIsNotNone(FileShare.objects.get(token=token))
def test_can_create_file_download_link_with_exipre(self):
self.login_as(self.user)
# create file download share link with expire
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=f&expire=5" % (self.file),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.assertRegexpMatches(resp._headers['location'][1], \
r'http(.*)/f/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
fileshare = FileShare.objects.get(token=token)
self.assertIsNotNone(fileshare.expire_date)
def test_can_create_file_download_link_with_password(self):
self.login_as(self.user)
# create file download share link with password
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=f&password=123" % (self.file),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.assertRegexpMatches(resp._headers['location'][1], \
r'http(.*)/f/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
fileshare = FileShare.objects.get(token=token)
self.assertIsNotNone(fileshare.password)
def test_can_create_file_download_link_with_password_exipre(self):
self.login_as(self.user)
# create file download share link with password and expire
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=f&password=123&expire=5" % (self.file),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.assertRegexpMatches(resp._headers['location'][1], \
r'http(.*)/f/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
fileshare = FileShare.objects.get(token=token)
self.assertIsNotNone(fileshare.expire_date)
self.assertIsNotNone(fileshare.password)
def test_can_create_dir_download_link(self):
self.login_as(self.user)
# create dir download share link
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=d" % (self.folder),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.dir_link_location = resp._headers['location'][1]
self.assertRegexpMatches(self.dir_link_location, \
r'http(.*)/d/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
self.assertIsNotNone(FileShare.objects.get(token=token))
def test_can_create_dir_download_link_with_exipre(self):
self.login_as(self.user)
# create dir download share link with expire
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=d&expire=5" % (self.folder),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.dir_link_location = resp._headers['location'][1]
self.assertRegexpMatches(self.dir_link_location, \
r'http(.*)/d/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
fileshare = FileShare.objects.get(token=token)
self.assertIsNotNone(fileshare.expire_date)
def test_can_create_dir_download_link_with_password(self):
self.login_as(self.user)
# create dir download share link with password
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=d&password=123" % (self.folder),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.dir_link_location = resp._headers['location'][1]
self.assertRegexpMatches(self.dir_link_location, \
r'http(.*)/d/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
fileshare = FileShare.objects.get(token=token)
self.assertIsNotNone(fileshare.password)
def test_can_create_dir_download_link_with_password_exipre(self):
self.login_as(self.user)
# create dir download share link with password and expire
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=d&password=123&expire=5" % (self.folder),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.dir_link_location = resp._headers['location'][1]
self.assertRegexpMatches(self.dir_link_location, \
r'http(.*)/d/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
fileshare = FileShare.objects.get(token=token)
self.assertIsNotNone(fileshare.expire_date)
self.assertIsNotNone(fileshare.password)
def test_can_create_dir_upload_link(self):
self.login_as(self.user)
# create dir download share link
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=d&share_type=upload" % (self.folder),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.dir_link_location = resp._headers['location'][1]
self.assertRegexpMatches(self.dir_link_location, \
r'http(.*)/u/d/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
self.assertIsNotNone(UploadLinkShare.objects.get(token=token))
def test_can_create_dir_upload_link_with_password(self):
self.login_as(self.user)
# create dir download share link with password
resp = self.client.put(
'/api2/repos/%s/file/shared-link/' % (self.repo.id),
"p=%s&type=d&share_type=upload&password=123" % (self.folder),
'application/x-www-form-urlencoded',
)
self.assertEqual(201, resp.status_code)
self.dir_link_location = resp._headers['location'][1]
self.assertRegexpMatches(self.dir_link_location, \
r'http(.*)/u/d/(\w{10,10})/')
token = resp._headers['location'][1].split('/')[-2]
uls = UploadLinkShare.objects.get(token=token)
self.assertIsNotNone(uls.password)
class SharedFileDetailApiTest(BaseTestCase):
def _add_file_shared_link_with_password(self):
password = randstring(6)
uls = FileShare.objects.create_file_link(self.user.username,
self.repo.id, self.file, password)
return (uls.token, password)
def tearDown(self):
self.remove_repo()
def test_get_file_share_detail_with_password(self):
self.login_as(self.user)
token, password = self._add_file_shared_link_with_password()
url = '/api2/f/%s/detail/' % (token)
resp = self.client.get(url + '?password=' + password)
self.assertEqual(200, resp.status_code)
self.assertIsNotNone(resp)
def test_get_file_share_detail_with_invalid_password(self):
self.login_as(self.user)
token, password = self._add_file_shared_link_with_password()
url = '/api2/f/%s/detail/' % (token)
resp = self.client.get(url + '?password=' + randstring(5))
self.assertEqual(403, resp.status_code)
resp = self.client.get('/api2/f/%s/detail/' % (token))
self.assertEqual(403, resp.status_code)
class SharedDirApiTest(BaseTestCase):
def tearDown(self):
self.remove_repo()
def _add_dir_download_link(self):
ls = FileShare.objects.create_dir_link(self.user.username,
self.repo.id, self.folder)
self.create_folder(repo_id=self.repo.id,
parent_dir=self.folder,
dirname='sub-folder',
username='test@test.com')
self.create_file(repo_id=self.repo.id,
parent_dir=self.folder,
filename='sub-test.txt',
username='test@test.com')
return ls.token
def test_can_get_direntry_in_dir_download_link(self):
self.login_as(self.user)
token = self._add_dir_download_link()
# get direntry in dir download share link
resp = self.client.get(
'/api2/d/%s/dir/' % (token),
)
self.assertEqual(200, resp.status_code)
self.assertIsNotNone(resp)
class SharedLinksApiTest(BaseTestCase):
def tearDown(self):
self.remove_repo()
def _add_file_shared_link(self):
ls = FileShare.objects.create_dir_link(self.user.username,
self.repo.id, self.folder)
return ls.token
def _add_upload_shared_link(self):
uls = UploadLinkShare.objects.create_upload_link_share(
self.user.username, self.repo.id, self.folder)
return uls.token
def test_can_delete_file_shared_link(self):
self.login_as(self.user)
token = self._add_file_shared_link()
resp = self.client.get(
'/api2/shared-links/?t=%s' % (token),
)
self.assertEqual(200, resp.status_code)
def test_can_delete_upload_shared_link(self):
self.login_as(self.user)
token = self._add_upload_shared_link()
resp = self.client.get(
'/api2/shared-links/?t=%s' % (token),
)
self.assertEqual(200, resp.status_code)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for folding batch norm layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.quantize.python import fold_batch_norms
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import saver as saver_lib
batch_norm = layers.batch_norm
conv2d = layers.conv2d
fully_connected = layers.fully_connected
separable_conv2d = layers.separable_conv2d
# TODO(suharshs): Use parameterized test once OSS TF supports it.
class FoldBatchNormsTest(test_util.TensorFlowTestCase):
def _RunTestOverParameters(self, test_fn):
parameters_list = [
# (relu, relu_op_name, with_bypass, has_scaling, fused_batch_norm,
# freeze_batch_norm_delay)
(nn_ops.relu6, 'Relu6', False, False, False, 100),
(nn_ops.relu, 'Relu', False, False, False, None),
(nn_ops.relu6, 'Relu6', True, False, False, 100),
(nn_ops.relu, 'Relu', True, False, False, None),
(nn_ops.relu6, 'Relu6', False, True, False, 100),
(nn_ops.relu, 'Relu', False, True, False, None),
(nn_ops.relu6, 'Relu6', True, True, False, 100),
(nn_ops.relu, 'Relu', True, True, False, None),
# Fused batch norm always has scaling enabled.
(nn_ops.relu6, 'Relu6', False, True, True, None),
(nn_ops.relu, 'Relu', False, True, True, 100),
(nn_ops.relu6, 'Relu6', True, True, True, None),
(nn_ops.relu, 'Relu', True, True, True, 100),
]
for params in parameters_list:
test_fn(params[0], params[1], params[2], params[3], params[4], params[5])
def _TestFoldConv2d(self, relu, relu_op_name, with_bypass, has_scaling,
fused_batch_norm, freeze_batch_norm_delay):
"""Tests folding cases: inputs -> Conv2d with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul, [
scope + '/correction_mult',
self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm)
])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/Conv2D_Fold'])
folded_conv = g.get_operation_by_name(scope + '/Conv2D_Fold')
self.assertEqual(folded_conv.type, 'Conv2D')
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/post_conv_mul'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
scope + '/correction_add',
self._BathNormBiasName(scope, fused_batch_norm)
])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def testFoldConv2d(self):
self._RunTestOverParameters(self._TestFoldConv2d)
def _TestFoldConv2dUnknownShape(self, relu, relu_op_name, with_bypass,
has_scaling, fused_batch_norm,
freeze_batch_norm_delay):
"""Tests folding cases: inputs -> Conv2d with batch norm -> Relu*.
Tests that folding works even with an input shape where some dimensions are
not known (i.e. None).
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
"""
g = ops.Graph()
with g.as_default():
inputs = array_ops.placeholder(dtypes.float32, shape=(5, None, None, 3))
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul, [
scope + '/correction_mult',
self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm)
])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/Conv2D_Fold'])
folded_conv = g.get_operation_by_name(scope + '/Conv2D_Fold')
self.assertEqual(folded_conv.type, 'Conv2D')
self._AssertInputOpsAre(folded_conv, [scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/post_conv_mul'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
scope + '/correction_add',
self._BathNormBiasName(scope, fused_batch_norm)
])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def testFoldConv2dUnknownShape(self):
self._RunTestOverParameters(self._TestFoldConv2dUnknownShape)
def _TestFoldFullyConnectedLayer(self, relu, relu_op_name, with_bypass,
has_scaling, fused_batch_norm,
freeze_batch_norm_delay):
"""Tests folding cases: inputs -> FC with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
"""
g = ops.Graph()
with g.as_default():
batch_size, depth = 5, 256
inputs = array_ops.zeros((batch_size, depth))
out_depth = 256 if with_bypass else 128
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = fully_connected(
inputs,
out_depth,
weights_initializer=self._WeightInit(0.03),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
self._AssertInputOpsAre(folded_mul, [
scope + '/correction_mult',
self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm)
])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/MatMul_Fold'])
folded_conv = g.get_operation_by_name(scope + '/MatMul_Fold')
self.assertEqual(folded_conv.type, 'MatMul')
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/post_conv_mul'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
scope + '/correction_add',
self._BathNormBiasName(scope, fused_batch_norm)
])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def testFoldFullyConnectedLayer(self):
self._RunTestOverParameters(self._TestFoldFullyConnectedLayer)
def _TestFoldDepthwiseConv2d(self, relu, relu_op_name, with_bypass,
has_scaling, fused_batch_norm,
freeze_batch_norm_delay):
"""Tests folding: inputs -> DepthwiseConv2d with batch norm -> Relu*.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
"""
g = ops.Graph()
with g.as_default():
batch_size, height, width = 5, 128, 128
inputs = array_ops.zeros((batch_size, height, width, 3))
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = separable_conv2d(
inputs,
None, [5, 5],
stride=stride,
depth_multiplier=1.0,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu(node, name='test/' + relu_op_name)
fold_batch_norms.FoldBatchNorms(
g, is_training=True, freeze_batch_norm_delay=freeze_batch_norm_delay)
folded_mul = g.get_operation_by_name(scope + '/mul_fold')
self.assertEqual(folded_mul.type, 'Mul')
if fused_batch_norm:
scale_reshape_op_name = scope + '/BatchNorm_Fold/scale_reshape'
else:
scale_reshape_op_name = scope + '/scale_reshape'
self._AssertInputOpsAre(folded_mul,
[scope + '/correction_mult', scale_reshape_op_name])
self._AssertOutputGoesToOps(folded_mul, g, [scope + '/depthwise_Fold'])
scale_reshape = g.get_operation_by_name(scale_reshape_op_name)
self.assertEqual(scale_reshape.type, 'Reshape')
self._AssertInputOpsAre(scale_reshape, [
self._BatchNormMultiplierName(scope, has_scaling, fused_batch_norm),
scale_reshape_op_name + '/shape'
])
self._AssertOutputGoesToOps(scale_reshape, g, [scope + '/mul_fold'])
folded_conv = g.get_operation_by_name(scope + '/depthwise_Fold')
self.assertEqual(folded_conv.type, 'DepthwiseConv2dNative')
self._AssertInputOpsAre(folded_conv,
[scope + '/mul_fold', inputs.op.name])
self._AssertOutputGoesToOps(folded_conv, g, [scope + '/post_conv_mul'])
folded_add = g.get_operation_by_name(scope + '/add_fold')
self.assertEqual(folded_add.type, 'Add')
self._AssertInputOpsAre(folded_add, [
scope + '/correction_add',
self._BathNormBiasName(scope, fused_batch_norm)
])
output_op_names = ['test/Add' if with_bypass else 'test/' + relu_op_name]
self._AssertOutputGoesToOps(folded_add, g, output_op_names)
for op in g.get_operations():
self.assertFalse('//' in op.name, 'Double slash in op %s' % op.name)
def testFoldDepthwiseConv2d(self):
self._RunTestOverParameters(self._TestFoldDepthwiseConv2d)
def _TestCompareFoldAndUnfolded(self, relu, relu_op_name, with_bypass,
has_scaling, fused_batch_norm,
freeze_batch_norm_delay):
"""Tests that running folded and unfolded BN returns the same results.
Args:
relu: Callable that returns an Operation, a factory method for the Relu*.
relu_op_name: String, name of the Relu* operation.
with_bypass: Bool, when true there is an extra connection added from
inputs to just before Relu*.
has_scaling: Bool, when true the batch norm has scaling.
fused_batch_norm: Bool, when true the batch norm is fused.
freeze_batch_norm_delay: None or the number of steps after which training
switches to using frozen mean and variance
"""
random_seed.set_random_seed(1234)
unfolded_g = ops.Graph()
with unfolded_g.as_default():
batch_size, height, width = 5, 128, 128
inputs = random_ops.random_uniform(
(batch_size, height, width, 3), dtype=dtypes.float32, seed=1234)
out_depth = 3 if with_bypass else 32
stride = 1 if with_bypass else 2
activation_fn = None if with_bypass else relu
scope = 'test/test2' if with_bypass else 'test'
node = conv2d(
inputs,
out_depth, [5, 5],
stride=stride,
padding='SAME',
weights_initializer=self._WeightInit(0.09),
activation_fn=activation_fn,
normalizer_fn=batch_norm,
normalizer_params=self._BatchNormParams(
scale=has_scaling, fused=fused_batch_norm),
scope=scope)
if with_bypass:
node = math_ops.add(inputs, node, name='test/Add')
relu_node = relu(node, name='test/' + relu_op_name)
folded_g = self._CopyGraph(unfolded_g)
with folded_g.as_default():
fold_batch_norms.FoldBatchNorms(
folded_g,
is_training=True,
freeze_batch_norm_delay=freeze_batch_norm_delay)
with session.Session(graph=unfolded_g) as sess:
sess.run(variables.global_variables_initializer())
grad_node = gradients.gradients(relu_node, inputs)
results = sess.run([relu_node, grad_node])
unfolded_forward, unfolded_backward = results[0], results[1]
with session.Session(graph=folded_g) as sess:
sess.run(variables.global_variables_initializer())
relu_node = folded_g.get_tensor_by_name(relu_node.name)
inputs = folded_g.get_tensor_by_name(inputs.name)
grad_node = gradients.gradients(relu_node, inputs)
results = sess.run([relu_node, grad_node])
folded_forward, folded_backward = results[0], results[1]
# Check that the folded and unfolded results match.
self.assertAllClose(unfolded_forward, folded_forward, atol=1e-3)
self.assertAllClose(unfolded_backward, folded_backward, atol=1e-3)
def testCompareFoldAndUnfolded(self):
self._RunTestOverParameters(self._TestCompareFoldAndUnfolded)
def _BatchNormParams(self, scale=True, fused=False):
return {
'center': True,
'scale': scale,
'decay': 1.0 - 0.003,
'fused': fused
}
def _BatchNormMultiplierName(self, scope, has_scaling, fused):
if has_scaling:
if fused:
return scope + '/BatchNorm_Fold/mul'
return scope + '/BatchNorm/batchnorm/mul'
return scope + '/BatchNorm/batchnorm/Rsqrt'
def _BathNormBiasName(self, scope, fused):
if fused:
return scope + '/BatchNorm_Fold/bias'
return scope + '/BatchNorm/batchnorm/sub'
def _WeightInit(self, stddev):
"""Returns a truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initializer that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev, seed=1234)
def _AssertInputOpsAre(self, op, in_op_names):
"""Asserts that all inputs to op come from in_op_names (disregarding order).
Args:
op: Operation to check inputs for.
in_op_names: List of strings, operations where all op's inputs should
come from.
"""
expected_inputs = [in_op_name + ':0' for in_op_name in in_op_names]
self.assertItemsEqual([t.name for t in op.inputs], expected_inputs)
def _AssertOutputGoesToOps(self, op, graph, out_op_names):
"""Asserts that outputs from op go to out_op_names (and perhaps others).
Args:
op: Operation to check outputs for.
graph: Graph where output operations are located.
out_op_names: List of strings, operations where op's outputs should go.
"""
for out_op_name in out_op_names:
out_op = graph.get_operation_by_name(out_op_name)
self.assertIn(op.outputs[0].name, [str(t.name) for t in out_op.inputs])
def _CopyGraph(self, graph):
"""Return a copy of graph."""
meta_graph = saver_lib.export_meta_graph(
graph=graph, collection_list=graph.get_all_collection_keys())
graph_copy = ops.Graph()
with graph_copy.as_default():
_ = saver_lib.import_meta_graph(meta_graph)
return graph_copy
if __name__ == '__main__':
googletest.main()
| |
from __future__ import unicode_literals
from django.contrib.auth.models import User
from kgb import SpyAgency
from reviewboard.accounts.models import Profile, LocalSiteProfile
from reviewboard.reviews.errors import NotModifiedError
from reviewboard.reviews.models import (Group, ReviewRequest,
ReviewRequestDraft)
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.site.models import LocalSite
from reviewboard.testing import TestCase
class ReviewRequestCounterTests(SpyAgency, TestCase):
"""Unit tests for review request counters."""
fixtures = ['test_scmtools']
def setUp(self):
super(ReviewRequestCounterTests, self).setUp()
tool = Tool.objects.get(name='Subversion')
repository = Repository.objects.create(name='Test1', path='path1',
tool=tool)
self.user = User.objects.create_user(username='testuser', password='',
email='user@example.com')
self.profile = self.user.get_profile()
self.test_site = LocalSite.objects.create(name='test')
self.site_profile2 = \
LocalSiteProfile.objects.create(user=self.user,
profile=self.profile,
local_site=self.test_site)
self.review_request = self.create_review_request(submitter=self.user,
repository=repository)
self.profile.star_review_request(self.review_request)
self.site_profile = self.profile.site_profiles.get(local_site=None)
self.assertEqual(self.site_profile.total_outgoing_request_count, 1)
self.assertEqual(self.site_profile.pending_outgoing_request_count, 1)
self.assertEqual(self.site_profile.starred_public_request_count, 0)
self.group = Group.objects.create(name='test-group')
self.group.users.add(self.user)
self._reload_objects()
self.assertEqual(self.site_profile2.total_outgoing_request_count, 0)
self.assertEqual(self.site_profile2.pending_outgoing_request_count, 0)
self.assertEqual(self.site_profile2.starred_public_request_count, 0)
def test_new_site_profile(self):
"""Testing counters on a new LocalSiteProfile"""
self.site_profile.delete()
self.site_profile = \
LocalSiteProfile.objects.create(user=self.user,
profile=self.profile)
self.assertEqual(self.site_profile.total_outgoing_request_count, 1)
self.assertEqual(self.site_profile.pending_outgoing_request_count, 1)
self.assertEqual(self.site_profile.starred_public_request_count, 0)
self.review_request.publish(self.user)
self._reload_objects()
self.assertEqual(self.site_profile.total_outgoing_request_count, 1)
self.assertEqual(self.site_profile.pending_outgoing_request_count, 1)
self.assertEqual(self.site_profile.starred_public_request_count, 1)
def test_outgoing_requests(self):
"""Testing counters with creating outgoing review requests"""
# The review request was already created
self._check_counters(total_outgoing=1,
pending_outgoing=1)
draft = ReviewRequestDraft.create(self.review_request)
draft.target_people = [self.user]
draft.save()
self.review_request.publish(self.user)
self._check_counters(direct_incoming=1,
total_incoming=1,
total_outgoing=1,
pending_outgoing=1,
starred_public=1)
def test_closing_requests(self, close_type=ReviewRequest.DISCARDED):
"""Testing counters with closing outgoing review requests"""
# The review request was already created
self._check_counters(total_outgoing=1, pending_outgoing=1)
draft = ReviewRequestDraft.create(self.review_request)
draft.target_groups.add(self.group)
draft.target_people.add(self.user)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1,
group_incoming=1)
self.assertTrue(self.review_request.public)
self.assertEqual(self.review_request.status,
ReviewRequest.PENDING_REVIEW)
self.review_request.close(close_type)
self._check_counters(total_outgoing=1)
def test_closing_draft_requests(self, close_type=ReviewRequest.DISCARDED):
"""Testing counters with closing draft review requests"""
# The review request was already created
self._check_counters(total_outgoing=1,
pending_outgoing=1)
self.assertFalse(self.review_request.public)
self.assertEqual(self.review_request.status,
ReviewRequest.PENDING_REVIEW)
self.review_request.close(close_type)
self._check_counters(total_outgoing=1)
def test_closing_closed_requests(self):
"""Testing counters with closing closed review requests"""
self._check_counters(total_outgoing=1,
pending_outgoing=1)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
starred_public=1)
self.assertTrue(self.review_request.public)
self.assertEqual(self.review_request.status,
ReviewRequest.PENDING_REVIEW)
self.review_request.close(ReviewRequest.DISCARDED)
self._check_counters(total_outgoing=1)
self.review_request.close(ReviewRequest.SUBMITTED)
self._check_counters(total_outgoing=1)
def test_closing_draft_requests_with_site(self):
"""Testing counters with closing draft review requests on LocalSite"""
self.review_request.delete()
self._check_counters(with_local_site=True)
tool = Tool.objects.get(name='Subversion')
repository = Repository.objects.create(name='Test1', path='path1',
tool=tool,
local_site=self.test_site)
self.review_request = ReviewRequest.objects.create(
self.user,
repository,
local_site=self.test_site)
self._check_counters(with_local_site=True,
total_outgoing=1,
pending_outgoing=1)
self.assertFalse(self.review_request.public)
self.assertEqual(self.review_request.status,
ReviewRequest.PENDING_REVIEW)
self.review_request.close(ReviewRequest.DISCARDED)
self._check_counters(with_local_site=True,
total_outgoing=1)
def test_deleting_requests(self):
"""Testing counters with deleting outgoing review requests"""
# The review request was already created
self._check_counters(total_outgoing=1,
pending_outgoing=1)
draft = ReviewRequestDraft.create(self.review_request)
draft.target_groups.add(self.group)
draft.target_people.add(self.user)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1,
group_incoming=1)
self.review_request.delete()
self._check_counters()
def test_deleting_draft_requests(self):
"""Testing counters with deleting draft review requests"""
# We're simulating what a DefaultReviewer would do by populating
# the ReviewRequest's target users and groups while not public and
# without a draft.
self.review_request.target_people.add(self.user)
self.review_request.target_groups.add(self.group)
# The review request was already created
self._check_counters(total_outgoing=1,
pending_outgoing=1)
self.review_request.delete()
self._check_counters()
def test_deleting_closed_requests(self):
"""Testing counters with deleting closed review requests"""
# We're simulating what a DefaultReviewer would do by populating
# the ReviewRequest's target users and groups while not public and
# without a draft.
self.review_request.target_people.add(self.user)
self.review_request.target_groups.add(self.group)
# The review request was already created
self._check_counters(total_outgoing=1,
pending_outgoing=1)
self.review_request.close(ReviewRequest.DISCARDED)
self._check_counters(total_outgoing=1)
self.review_request.delete()
self._check_counters()
def test_reopen_discarded_requests(self):
"""Testing counters with reopening discarded outgoing review requests
"""
self.test_closing_requests(ReviewRequest.DISCARDED)
self.review_request.reopen()
self.assertFalse(self.review_request.public)
self.assertEqual(self.review_request.status,
ReviewRequest.PENDING_REVIEW)
self._check_counters(total_outgoing=1,
pending_outgoing=1)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1,
group_incoming=1)
def test_reopen_submitted_requests(self):
"""Testing counters with reopening submitted outgoing review requests
"""
self.test_closing_requests(ReviewRequest.SUBMITTED)
self.review_request.reopen()
self.assertTrue(self.review_request.public)
self.assertEqual(self.review_request.status,
ReviewRequest.PENDING_REVIEW)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1,
group_incoming=1)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1,
group_incoming=1)
def test_reopen_discarded_draft_requests(self):
"""Testing counters with reopening discarded draft review requests"""
self.assertFalse(self.review_request.public)
self.test_closing_draft_requests(ReviewRequest.DISCARDED)
self.review_request.reopen()
self.assertFalse(self.review_request.public)
self.assertEqual(self.review_request.status,
ReviewRequest.PENDING_REVIEW)
self._check_counters(total_outgoing=1,
pending_outgoing=1)
def test_reopen_submitted_draft_requests(self):
"""Testing counters with reopening submitted draft review requests"""
self.test_closing_requests(ReviewRequest.SUBMITTED)
# We're simulating what a DefaultReviewer would do by populating
# the ReviewRequest's target users and groups while not public and
# without a draft.
self.review_request.target_people.add(self.user)
self.review_request.target_groups.add(self.group)
self._check_counters(total_outgoing=1)
self.review_request.reopen()
self.assertTrue(self.review_request.public)
self.assertEqual(self.review_request.status,
ReviewRequest.PENDING_REVIEW)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1,
group_incoming=1)
def test_double_publish(self):
"""Testing counters with publishing a review request twice"""
self.assertFalse(self.review_request.public)
self.assertEqual(self.review_request.status,
ReviewRequest.PENDING_REVIEW)
# Publish the first time.
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
starred_public=1)
# Publish the second time.
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
starred_public=1)
def test_add_group(self):
"""Testing counters when adding a group reviewer"""
draft = ReviewRequestDraft.create(self.review_request)
draft.summary = 'Test Summary'
draft.target_groups.add(self.group)
self._check_counters(total_outgoing=1,
pending_outgoing=1)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=1,
group_incoming=1,
starred_public=1)
def test_remove_group(self):
"""Testing counters when removing a group reviewer"""
self.test_add_group()
draft = ReviewRequestDraft.create(self.review_request)
draft.target_groups.remove(self.group)
# There must be at least one target_group or target_people
draft.target_people = [self.user]
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=1,
direct_incoming=0,
group_incoming=1,
starred_public=1)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1)
def test_remove_group_and_fail_publish(self):
"""Testing counters when removing a group reviewer and then
failing to publish the draft
"""
self.test_add_group()
draft = ReviewRequestDraft.create(self.review_request)
draft.target_groups.remove(self.group)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=1,
group_incoming=1,
starred_public=1)
self.spy_on(ReviewRequestDraft.publish,
owner=ReviewRequestDraft,
call_fake=self._raise_publish_error)
with self.assertRaises(NotModifiedError):
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=1,
group_incoming=1,
starred_public=1)
def test_add_person(self):
"""Testing counters when adding a person reviewer"""
draft = ReviewRequestDraft.create(self.review_request)
draft.summary = 'Test Summary'
draft.target_people.add(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1)
def test_remove_person(self):
"""Testing counters when removing a person reviewer"""
self.test_add_person()
draft = ReviewRequestDraft.create(self.review_request)
draft.target_people.remove(self.user)
# There must be at least one target_group or target_people
draft.target_groups = [self.group]
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
group_incoming=1,
total_incoming=1,
starred_public=1)
def test_remove_person_and_fail_publish(self):
"""Testing counters when removing a person reviewer and then
failing to publish the draft
"""
self.test_add_person()
draft = ReviewRequestDraft.create(self.review_request)
draft.target_people.remove(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1)
self.spy_on(ReviewRequestDraft.publish,
owner=ReviewRequestDraft,
call_fake=self._raise_publish_error)
with self.assertRaises(NotModifiedError):
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1)
def test_populate_counters(self):
"""Testing counters when populated from a fresh upgrade or clear"""
# The review request was already created
draft = ReviewRequestDraft.create(self.review_request)
draft.target_groups.add(self.group)
draft.target_people.add(self.user)
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=1,
direct_incoming=1,
starred_public=1,
group_incoming=1)
LocalSiteProfile.objects.update(
direct_incoming_request_count=None,
total_incoming_request_count=None,
pending_outgoing_request_count=None,
total_outgoing_request_count=None,
starred_public_request_count=None)
Group.objects.update(incoming_request_count=None)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=1,
direct_incoming=1,
starred_public=1,
group_incoming=1)
def test_populate_counters_after_change(self):
"""Testing counter inc/dec on uninitialized counter fields"""
# The review request was already created
draft = ReviewRequestDraft.create(self.review_request)
draft.target_groups.add(self.group)
draft.target_people.add(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1)
LocalSiteProfile.objects.update(
direct_incoming_request_count=None,
total_incoming_request_count=None,
pending_outgoing_request_count=None,
total_outgoing_request_count=None,
starred_public_request_count=None)
Group.objects.update(incoming_request_count=None)
profile_fields = [
'direct_incoming_request_count',
'total_incoming_request_count',
'pending_outgoing_request_count',
'total_outgoing_request_count',
'starred_public_request_count',
]
# Lock the fields so we don't re-initialize them on publish.
locks = {
self.site_profile: 1,
self.site_profile2: 1,
}
for field in profile_fields:
getattr(LocalSiteProfile, field)._locks = locks
Group.incoming_request_count._locks = locks
# Publish the review request. This will normally try to
# increment/decrement the counts, which it should ignore now.
self.review_request.publish(self.user)
# Unlock the profiles so we can query/re-initialize them again.
for field in profile_fields:
getattr(LocalSiteProfile, field)._locks = {}
Group.incoming_request_count._locks = {}
self._check_counters(total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1,
starred_public=1,
group_incoming=1)
def test_counts_with_reassignment_in_initial_draft(self):
"""Testing counters when changing review request ownership in initial
draft
"""
self._check_counters(total_outgoing=1,
pending_outgoing=1)
new_user = User.objects.create_user(username='test2',
password='',
email='user@example.com')
site_profile = \
new_user.get_site_profile(self.review_request.local_site)
draft = ReviewRequestDraft.create(self.review_request)
draft.owner = new_user
draft.target_people = [draft.owner]
draft.save()
self.review_request.publish(self.user)
self._check_counters(total_outgoing=0,
pending_outgoing=0,
starred_public=1)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self._check_counters_on_profile(site_profile,
total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1)
def test_counts_with_reassignment_in_initial_draft_new_profile(self):
"""Testing counters when changing review request ownership in initial
draft and new owner without initial site profile
"""
self._check_counters(total_outgoing=1,
pending_outgoing=1)
new_user = User.objects.create_user(username='test2',
password='',
email='user@example.com')
draft = ReviewRequestDraft.create(self.review_request)
draft.owner = new_user
draft.target_people = [draft.owner]
draft.save()
self.review_request.publish(self.user)
self._check_counters(total_outgoing=0,
pending_outgoing=0,
starred_public=1)
site_profile = \
new_user.get_site_profile(self.review_request.local_site)
self._check_counters_on_profile(site_profile,
total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1)
def test_counts_with_reassignment_after_publish(self):
"""Testing counters when changing review request ownership after
publish
"""
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
starred_public=1)
new_user = User.objects.create_user(username='test2',
password='',
email='user@example.com')
site_profile = \
new_user.get_site_profile(self.review_request.local_site)
draft = ReviewRequestDraft.create(self.review_request)
draft.owner = new_user
draft.target_people = [draft.owner]
draft.save()
self.review_request.publish(self.user)
self._check_counters(total_outgoing=0,
pending_outgoing=0,
starred_public=1)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self._check_counters_on_profile(site_profile,
total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1)
def test_counts_with_reassignment_after_publish_new_profile(self):
"""Testing counters when changing review request ownership after
publish and new owner without initial site profile
"""
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
starred_public=1)
new_user = User.objects.create_user(username='test2',
password='',
email='user@example.com')
draft = ReviewRequestDraft.create(self.review_request)
draft.owner = new_user
draft.target_people = [draft.owner]
draft.save()
self.review_request.publish(self.user)
self._check_counters(total_outgoing=0,
pending_outgoing=0,
starred_public=1)
site_profile = \
new_user.get_site_profile(self.review_request.local_site)
self._check_counters_on_profile(site_profile,
total_outgoing=1,
pending_outgoing=1,
direct_incoming=1,
total_incoming=1)
def test_counts_with_reassignment_and_close(self):
"""Testing counters when changing review request ownership and closing
in same operation
"""
self.review_request.publish(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
starred_public=1)
new_user = User.objects.create_user(username='test2',
password='',
email='user@example.com')
site_profile = \
new_user.get_site_profile(self.review_request.local_site)
# Note that it's not normally possible to update something like an
# owner while also closing in the same operation. Drafts don't allow
# it. However, we have logic that considers these combinations of
# operations, and it's technically possible to do, so we're testing
# it here by updating the review request manually.
self.review_request.owner = new_user
self.review_request.status = ReviewRequest.SUBMITTED
self.review_request.save(update_counts=True,
old_submitter=self.user)
self._check_counters(total_outgoing=0,
pending_outgoing=0,
starred_public=0)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self._check_counters_on_profile(site_profile,
total_outgoing=1,
pending_outgoing=0)
def test_counts_with_reassignment_and_reopen(self):
"""Testing counters when changing review request ownership and
reopening in same operation
"""
self.review_request.close(ReviewRequest.DISCARDED)
self.assertFalse(self.review_request.public)
self.assertEqual(self.review_request.status, ReviewRequest.DISCARDED)
self._check_counters(total_outgoing=1,
pending_outgoing=0,
starred_public=0)
new_user = User.objects.create_user(username='test2',
password='',
email='user@example.com')
site_profile = \
new_user.get_site_profile(self.review_request.local_site)
# Note that it's not normally possible to update something like an
# owner while also reopening in the same operation. Drafts don't allow
# it. However, we have logic that considers these combinations of
# operations, and it's technically possible to do, so we're testing
# it here by updating the review request manually.
self.review_request.owner = new_user
self.review_request.status = ReviewRequest.PENDING_REVIEW
self.review_request.save(update_counts=True,
old_submitter=self.user)
self._check_counters(total_outgoing=0,
pending_outgoing=0,
starred_public=0)
site_profile = LocalSiteProfile.objects.get(pk=site_profile.pk)
self._check_counters_on_profile(site_profile,
total_outgoing=1,
pending_outgoing=1)
def test_counts_with_join_group(self):
"""Testing counters when joining a review group"""
user2 = self.create_user()
group2 = self.create_review_group(name='group2')
self.create_review_request(submitter=user2,
target_groups=[group2],
publish=True)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=0)
group2.users.add(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=1)
def test_counts_with_leave_group(self):
"""Testing counters when leaving a review group"""
user2 = self.create_user()
group2 = self.create_review_group(name='group2')
group2.users.add(self.user)
self.create_review_request(submitter=user2,
target_groups=[group2],
publish=True)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=1)
group2.users.remove(self.user)
self._check_counters(total_outgoing=1,
pending_outgoing=1,
total_incoming=0)
def _check_counters(self, total_outgoing=0, pending_outgoing=0,
direct_incoming=0, total_incoming=0,
starred_public=0, group_incoming=0,
with_local_site=False):
"""Check that the counters match the expected values.
Args:
total_outgoing (int):
The expected number of total outgoing review requests.
pending_outgoing (int):
The expected number of pending outgoing review requests.
direct_incoming (int):
The expected number of review requests assigned directly to the
user.
total_incoming (int):
The expected number of review requests assigned either directly
or indirectly to the user.
starred_public (int):
The expected number of public review requests starred by the
user.
group_incoming (int):
The expected number of review requests assigned to the test
group.
with_local_site (bool):
Whether to run the test for a local site.
"""
self._reload_objects()
if with_local_site:
main_site_profile = self.site_profile2
unused_site_profile = self.site_profile
else:
main_site_profile = self.site_profile
unused_site_profile = self.site_profile2
self._check_counters_on_profile(main_site_profile, total_outgoing,
pending_outgoing, direct_incoming,
total_incoming, starred_public)
self.assertEqual(
self.group.incoming_request_count,
group_incoming,
'Expected Group.incoming_request_count to be %s. Got %s instead.'
% (group_incoming, self.group.incoming_request_count))
# These should never be affected by updates on the main LocalSite we're
# working with, so they should always be 0.
self._check_counters_on_profile(unused_site_profile)
def _check_counters_on_profile(self, profile, total_outgoing=0,
pending_outgoing=0, direct_incoming=0,
total_incoming=0, starred_public=0):
"""Check that the counters match the expected values.
Args:
profile (reviewboard.accounts.models.LocalSiteProfile):
The profile object to test counts on.
total_outgoing (int):
The expected number of total outgoing review requests.
pending_outgoing (int):
The expected number of pending outgoing review requests.
direct_incoming (int):
The expected number of review requests assigned directly to the
user.
total_incoming (int):
The expected number of review requests assigned either directly
or indirectly to the user.
starred_public (int):
The expected number of public review requests starred by the
user.
"""
msg = 'Expected %s to be %s. Got %s instead.'
self.assertEqual(
profile.total_outgoing_request_count,
total_outgoing,
msg % ('total_outgoing_request_count', total_outgoing,
profile.total_outgoing_request_count))
self.assertEqual(
profile.pending_outgoing_request_count,
pending_outgoing,
msg % ('pending_outgoing_request_count', pending_outgoing,
profile.pending_outgoing_request_count))
self.assertEqual(
profile.direct_incoming_request_count,
direct_incoming,
msg % ('direct_incoming_request_count', direct_incoming,
profile.direct_incoming_request_count))
self.assertEqual(
profile.total_incoming_request_count,
total_incoming,
msg % ('total_incoming_request_count', total_incoming,
profile.total_incoming_request_count))
self.assertEqual(
profile.starred_public_request_count,
starred_public,
msg % ('starred_public_request_count', starred_public,
profile.starred_public_request_count))
def _reload_objects(self):
self.test_site = LocalSite.objects.get(pk=self.test_site.pk)
self.site_profile = \
LocalSiteProfile.objects.get(pk=self.site_profile.pk)
self.site_profile2 = \
LocalSiteProfile.objects.get(pk=self.site_profile2.pk)
self.group = Group.objects.get(pk=self.group.pk)
def _raise_publish_error(self, *args, **kwargs):
raise NotModifiedError()
| |
#!/usr/bin/env python
from __future__ import print_function
import logging
import os
import subprocess
import textwrap
import warnings
from datetime import datetime
import argparse
from builtins import input
from collections import namedtuple
from dateutil.parser import parse as parsedate
import json
import daemon
from daemon.pidfile import TimeoutPIDLockFile
import signal
import sys
import airflow
from airflow import jobs, settings
from airflow import configuration as conf
from airflow.executors import DEFAULT_EXECUTOR
from airflow.models import DagModel, DagBag, TaskInstance, DagPickle, DagRun, Variable
from airflow.utils import db as db_utils
from airflow.utils import logging as logging_utils
from airflow.utils.state import State
from airflow.exceptions import AirflowException
DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
def sigint_handler(signal, frame):
sys.exit(0)
def setup_logging(filename):
root = logging.getLogger()
handler = logging.FileHandler(filename)
formatter = logging.Formatter(settings.SIMPLE_LOG_FORMAT)
handler.setFormatter(formatter)
root.addHandler(handler)
root.setLevel(settings.LOGGING_LEVEL)
return handler.stream
def setup_locations(process, pid=None, stdout=None, stderr=None, log=None):
if not stderr:
stderr = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME), "airflow-{}.err".format(process))
if not stdout:
stdout = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME), "airflow-{}.out".format(process))
if not log:
log = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME), "airflow-{}.log".format(process))
if not pid:
pid = os.path.join(os.path.expanduser(settings.AIRFLOW_HOME), "airflow-{}.pid".format(process))
return pid, stdout, stderr, log
def process_subdir(subdir):
dags_folder = conf.get("core", "DAGS_FOLDER")
dags_folder = os.path.expanduser(dags_folder)
if subdir:
if "DAGS_FOLDER" in subdir:
subdir = subdir.replace("DAGS_FOLDER", dags_folder)
subdir = os.path.abspath(os.path.expanduser(subdir))
return subdir
def get_dag(args):
dagbag = DagBag(process_subdir(args.subdir))
if args.dag_id not in dagbag.dags:
raise AirflowException(
'dag_id could not be found: {}'.format(args.dag_id))
return dagbag.dags[args.dag_id]
def backfill(args, dag=None):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dag = dag or get_dag(args)
if not args.start_date and not args.end_date:
raise AirflowException("Provide a start_date and/or end_date")
# If only one date is passed, using same as start and end
args.end_date = args.end_date or args.start_date
args.start_date = args.start_date or args.end_date
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_upstream=not args.ignore_dependencies)
if args.dry_run:
print("Dry run of DAG {0} on {1}".format(args.dag_id,
args.start_date))
for task in dag.tasks:
print("Task {0}".format(task.task_id))
ti = TaskInstance(task, args.start_date)
ti.dry_run()
else:
dag.run(
start_date=args.start_date,
end_date=args.end_date,
mark_success=args.mark_success,
include_adhoc=args.include_adhoc,
local=args.local,
donot_pickle=(args.donot_pickle or
conf.getboolean('core', 'donot_pickle')),
ignore_dependencies=args.ignore_dependencies,
ignore_first_depends_on_past=args.ignore_first_depends_on_past,
pool=args.pool)
def trigger_dag(args):
dag = get_dag(args)
if not dag:
logging.error("Cannot find dag {}".format(args.dag_id))
sys.exit(1)
execution_date = datetime.now()
run_id = args.run_id or "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=args.dag_id, run_id=run_id)
if dr:
logging.error("This run_id {} already exists".format(run_id))
raise AirflowException()
run_conf = {}
if args.conf:
run_conf = json.loads(args.conf)
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
logging.info("Created {}".format(trigger))
def variables(args):
if args.get:
try:
var = Variable.get(args.get,
deserialize_json=args.json,
default_var=args.default)
print(var)
except ValueError as e:
print(e)
if args.set:
Variable.set(args.set[0], args.set[1])
if not args.set and not args.get:
# list all variables
session = settings.Session()
vars = session.query(Variable)
msg = "\n".join(var.key for var in vars)
print(msg)
def pause(args, dag=None):
set_is_paused(True, args, dag)
def unpause(args, dag=None):
set_is_paused(False, args, dag)
def set_is_paused(is_paused, args, dag=None):
dag = dag or get_dag(args)
session = settings.Session()
dm = session.query(DagModel).filter(
DagModel.dag_id == dag.dag_id).first()
dm.is_paused = is_paused
session.commit()
msg = "Dag: {}, paused: {}".format(dag, str(dag.is_paused))
print(msg)
def run(args, dag=None):
db_utils.pessimistic_connection_handling()
if dag:
args.dag_id = dag.dag_id
# Setting up logging
log_base = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
directory = log_base + "/{args.dag_id}/{args.task_id}".format(args=args)
if not os.path.exists(directory):
os.makedirs(directory)
iso = args.execution_date.isoformat()
filename = "{directory}/{iso}".format(**locals())
logging.root.handlers = []
logging.basicConfig(
filename=filename,
level=settings.LOGGING_LEVEL,
format=settings.LOG_FORMAT)
if not args.pickle and not dag:
dag = get_dag(args)
elif not dag:
session = settings.Session()
logging.info('Loading pickle id {args.pickle}'.format(**locals()))
dag_pickle = session.query(
DagPickle).filter(DagPickle.id == args.pickle).first()
if not dag_pickle:
raise AirflowException("Who hid the pickle!? [missing pickle]")
dag = dag_pickle.pickle
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
if args.local:
print("Logging into: " + filename)
run_job = jobs.LocalTaskJob(
task_instance=ti,
mark_success=args.mark_success,
force=args.force,
pickle_id=args.pickle,
ignore_dependencies=args.ignore_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
pool=args.pool)
run_job.run()
elif args.raw:
ti.run(
mark_success=args.mark_success,
force=args.force,
ignore_dependencies=args.ignore_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
job_id=args.job_id,
pool=args.pool,
)
else:
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
session = settings.Session()
pickle = DagPickle(dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
print((
'Pickled dag {dag} '
'as pickle_id:{pickle_id}').format(**locals()))
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = DEFAULT_EXECUTOR
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_dependencies=args.ignore_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
force=args.force,
pool=args.pool)
executor.heartbeat()
executor.end()
# Force the log to flush, and set the handler to go back to normal so we
# don't continue logging to the task's log file. The flush is important
# because we subsequently read from the log to insert into S3 or Google
# cloud storage.
logging.root.handlers[0].flush()
logging.root.handlers = []
# store logs remotely
remote_base = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
# deprecated as of March 2016
if not remote_base and conf.get('core', 'S3_LOG_FOLDER'):
warnings.warn(
'The S3_LOG_FOLDER conf key has been replaced by '
'REMOTE_BASE_LOG_FOLDER. Your conf still works but please '
'update airflow.cfg to ensure future compatibility.',
DeprecationWarning)
remote_base = conf.get('core', 'S3_LOG_FOLDER')
if os.path.exists(filename):
# read log and remove old logs to get just the latest additions
with open(filename, 'r') as logfile:
log = logfile.read()
remote_log_location = filename.replace(log_base, remote_base)
# S3
if remote_base.startswith('s3:/'):
logging_utils.S3Log().write(log, remote_log_location)
# GCS
elif remote_base.startswith('gs:/'):
logging_utils.GCSLog().write(
log,
remote_log_location,
append=True)
# Other
elif remote_base and remote_base != 'None':
logging.error(
'Unsupported remote log location: {}'.format(remote_base))
def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow task_state tutorial sleep 2015-01-01
success
"""
dag = get_dag(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
print(ti.current_state())
def dag_state(args):
"""
Returns the state of a DagRun at the command line.
>>> airflow dag_state tutorial 2015-01-01T00:00:00.000000
running
"""
dag = get_dag(args)
dr = DagRun.find(dag.dag_id, execution_date=args.execution_date)
print(dr[0].state if len(dr) > 0 else None)
def list_dags(args):
dagbag = DagBag(process_subdir(args.subdir))
s = textwrap.dedent("""\n
-------------------------------------------------------------------
DAGS
-------------------------------------------------------------------
{dag_list}
""")
dag_list = "\n".join(sorted(dagbag.dags))
print(s.format(dag_list=dag_list))
if args.report:
print(dagbag.dagbag_report())
def list_tasks(args, dag=None):
dag = dag or get_dag(args)
if args.tree:
dag.tree_view()
else:
tasks = sorted([t.task_id for t in dag.tasks])
print("\n".join(sorted(tasks)))
def test(args, dag=None):
dag = dag or get_dag(args)
task = dag.get_task(task_id=args.task_id)
# Add CLI provided task_params to task.params
if args.task_params:
passed_in_params = json.loads(args.task_params)
task.params.update(passed_in_params)
ti = TaskInstance(task, args.execution_date)
if args.dry_run:
ti.dry_run()
else:
ti.run(force=True, ignore_dependencies=True, test_mode=True)
def render(args):
dag = get_dag(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
ti.render_templates()
for attr in task.__class__.template_fields:
print(textwrap.dedent("""\
# ----------------------------------------------------------
# property: {}
# ----------------------------------------------------------
{}
""".format(attr, getattr(task, attr))))
def clear(args):
logging.basicConfig(
level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
dag = get_dag(args)
if args.task_regex:
dag = dag.sub_dag(
task_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
dag.clear(
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.no_confirm,
include_subdags=not args.exclude_subdags)
def webserver(args):
print(settings.HEADER)
from airflow.www.app import cached_app
app = cached_app(conf)
access_logfile = args.access_logfile or conf.get('webserver', 'access_logfile')
error_logfile = args.error_logfile or conf.get('webserver', 'error_logfile')
workers = args.workers or conf.get('webserver', 'workers')
worker_timeout = (args.worker_timeout or
conf.get('webserver', 'webserver_worker_timeout'))
if args.debug:
print(
"Starting the web server on port {0} and host {1}.".format(
args.port, args.hostname))
app.run(debug=True, port=args.port, host=args.hostname)
else:
pid, stdout, stderr, log_file = setup_locations("webserver", pid=args.pid)
print(
textwrap.dedent('''\
Running the Gunicorn Server with:
Workers: {workers} {args.workerclass}
Host: {args.hostname}:{args.port}
Timeout: {worker_timeout}
Logfiles: {access_logfile} {error_logfile}
=================================================================\
'''.format(**locals())))
run_args = [
'gunicorn',
'-w ' + str(args.workers),
'-k ' + str(args.workerclass),
'-t ' + str(args.worker_timeout),
'-b ' + args.hostname + ':' + str(args.port),
'-n ' + 'airflow-webserver',
'-p ' + str(pid),
]
if args.access_logfile:
run_args += ['--access-logfile', str(args.access_logfile)]
if args.error_logfile:
run_args += ['--error-logfile', str(args.error_logfile)]
if args.daemon:
run_args += ["-D"]
module = "airflow.www.app:cached_app()".encode()
run_args += [module]
os.execvp(
'gunicorn', run_args
)
def scheduler(args):
print(settings.HEADER)
job = jobs.SchedulerJob(
dag_id=args.dag_id,
subdir=process_subdir(args.subdir),
num_runs=args.num_runs,
do_pickle=args.do_pickle)
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("scheduler", args.pid, args.stdout, args.stderr, args.log_file)
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
)
with ctx:
job.run()
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
job.run()
def serve_logs(args):
print("Starting flask")
import flask
flask_app = flask.Flask(__name__)
@flask_app.route('/log/<path:filename>')
def serve_logs(filename): # noqa
log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER'))
return flask.send_from_directory(
log,
filename,
mimetype="application/json",
as_attachment=False)
WORKER_LOG_SERVER_PORT = \
int(conf.get('celery', 'WORKER_LOG_SERVER_PORT'))
flask_app.run(
host='0.0.0.0', port=WORKER_LOG_SERVER_PORT)
def worker(args):
env = os.environ.copy()
env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME
# Celery worker
from airflow.executors.celery_executor import app as celery_app
from celery.bin import worker
worker = worker.worker(app=celery_app)
options = {
'optimization': 'fair',
'O': 'fair',
'queues': args.queues,
'concurrency': args.concurrency,
}
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("worker", args.pid, args.stdout, args.stderr, args.log_file)
handle = setup_logging(log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
files_preserve=[handle],
stdout=stdout,
stderr=stderr,
)
with ctx:
sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)
worker.run(**options)
sp.kill()
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)
worker.run(**options)
sp.kill()
def initdb(args): # noqa
print("DB: " + repr(settings.engine.url))
db_utils.initdb()
print("Done.")
def resetdb(args):
print("DB: " + repr(settings.engine.url))
if args.yes or input(
"This will drop existing tables if they exist. "
"Proceed? (y/n)").upper() == "Y":
logging.basicConfig(level=settings.LOGGING_LEVEL,
format=settings.SIMPLE_LOG_FORMAT)
db_utils.resetdb()
else:
print("Bail.")
def upgradedb(args): # noqa
print("DB: " + repr(settings.engine.url))
db_utils.upgradedb()
def version(args): # noqa
print(settings.HEADER + " v" + airflow.__version__)
def flower(args):
broka = conf.get('celery', 'BROKER_URL')
address = '--address={}'.format(args.hostname)
port = '--port={}'.format(args.port)
api = ''
if args.broker_api:
api = '--broker_api=' + args.broker_api
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("flower", args.pid, args.stdout, args.stderr, args.log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
os.execvp("flower", ['flower', '-b', broka, address, port, api])
stdout.close()
stderr.close()
else:
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
os.execvp("flower", ['flower', '-b', broka, address, port, api])
def kerberos(args): # noqa
print(settings.HEADER)
import airflow.security.kerberos
if args.daemon:
pid, stdout, stderr, log_file = setup_locations("kerberos", args.pid, args.stdout, args.stderr, args.log_file)
stdout = open(stdout, 'w+')
stderr = open(stderr, 'w+')
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pid, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
airflow.security.kerberos.run()
stdout.close()
stderr.close()
else:
airflow.security.kerberos.run()
Arg = namedtuple(
'Arg', ['flags', 'help', 'action', 'default', 'nargs', 'type', 'choices', 'metavar'])
Arg.__new__.__defaults__ = (None, None, None, None, None, None, None)
class CLIFactory(object):
args = {
# Shared
'dag_id': Arg(("dag_id",), "The id of the dag"),
'task_id': Arg(("task_id",), "The id of the task"),
'execution_date': Arg(
("execution_date",), help="The execution date of the DAG",
type=parsedate),
'task_regex': Arg(
("-t", "--task_regex"),
"The regex to filter specific task_ids to backfill (optional)"),
'subdir': Arg(
("-sd", "--subdir"),
"File location or directory from which to look for the dag",
default=DAGS_FOLDER),
'start_date': Arg(
("-s", "--start_date"), "Override start_date YYYY-MM-DD",
type=parsedate),
'end_date': Arg(
("-e", "--end_date"), "Override end_date YYYY-MM-DD",
type=parsedate),
'dry_run': Arg(
("-dr", "--dry_run"), "Perform a dry run", "store_true"),
'pid': Arg(
("--pid", ), "PID file location",
nargs='?'),
'daemon': Arg(
("-D", "--daemon"), "Daemonize instead of running "
"in the foreground",
"store_true"),
'stderr': Arg(
("--stderr", ), "Redirect stderr to this file"),
'stdout': Arg(
("--stdout", ), "Redirect stdout to this file"),
'log_file': Arg(
("-l", "--log-file"), "Location of the log file"),
# backfill
'mark_success': Arg(
("-m", "--mark_success"),
"Mark jobs as succeeded without running them", "store_true"),
'local': Arg(
("-l", "--local"),
"Run the task using the LocalExecutor", "store_true"),
'donot_pickle': Arg(
("-x", "--donot_pickle"), (
"Do not attempt to pickle the DAG object to send over "
"to the workers, just tell the workers to run their version "
"of the code."),
"store_true"),
'include_adhoc': Arg(
("-a", "--include_adhoc"),
"Include dags with the adhoc parameter.", "store_true"),
'bf_ignore_dependencies': Arg(
("-i", "--ignore_dependencies"),
(
"Skip upstream tasks, run only the tasks "
"matching the regexp. Only works in conjunction "
"with task_regex"),
"store_true"),
'bf_ignore_first_depends_on_past': Arg(
("-I", "--ignore_first_depends_on_past"),
(
"Ignores depends_on_past dependencies for the first "
"set of tasks only (subsequent executions in the backfill "
"DO respect depends_on_past)."),
"store_true"),
'pool': Arg(("--pool",), "Resource pool to use"),
# list_tasks
'tree': Arg(("-t", "--tree"), "Tree view", "store_true"),
# list_dags
'report': Arg(
("-r", "--report"), "Show DagBag loading report", "store_true"),
# clear
'upstream': Arg(
("-u", "--upstream"), "Include upstream tasks", "store_true"),
'only_failed': Arg(
("-f", "--only_failed"), "Only failed jobs", "store_true"),
'only_running': Arg(
("-r", "--only_running"), "Only running jobs", "store_true"),
'downstream': Arg(
("-d", "--downstream"), "Include downstream tasks", "store_true"),
'no_confirm': Arg(
("-c", "--no_confirm"),
"Do not request confirmation", "store_true"),
'exclude_subdags': Arg(
("-x", "--exclude_subdags"),
"Exclude subdags", "store_true"),
# trigger_dag
'run_id': Arg(("-r", "--run_id"), "Helps to identify this run"),
'conf': Arg(
('-c', '--conf'),
"JSON string that gets pickled into the DagRun's conf attribute"),
# variables
'set': Arg(
("-s", "--set"),
nargs=2,
metavar=('KEY', 'VAL'),
help="Set a variable"),
'get': Arg(
("-g", "--get"),
metavar='KEY',
help="Get value of a variable"),
'default': Arg(
("-d", "--default"),
metavar="VAL",
default=None,
help="Default value returned if variable does not exist"),
'json': Arg(
("-j", "--json"),
help="Deserialize JSON variable",
action="store_true"),
# kerberos
'principal': Arg(
("principal",), "kerberos principal",
nargs='?', default=conf.get('kerberos', 'principal')),
'keytab': Arg(
("-kt", "--keytab"), "keytab",
nargs='?', default=conf.get('kerberos', 'keytab')),
# run
'force': Arg(
("-f", "--force"),
"Force a run regardless of previous success", "store_true"),
'raw': Arg(("-r", "--raw"), argparse.SUPPRESS, "store_true"),
'ignore_dependencies': Arg(
("-i", "--ignore_dependencies"),
"Ignore upstream and depends_on_past dependencies", "store_true"),
'ignore_depends_on_past': Arg(
("-I", "--ignore_depends_on_past"),
"Ignore depends_on_past dependencies (but respect "
"upstream dependencies)",
"store_true"),
'ship_dag': Arg(
("--ship_dag",),
"Pickles (serializes) the DAG and ships it to the worker",
"store_true"),
'pickle': Arg(
("-p", "--pickle"),
"Serialized pickle object of the entire dag (used internally)"),
'job_id': Arg(("-j", "--job_id"), argparse.SUPPRESS),
# webserver
'port': Arg(
("-p", "--port"),
default=conf.get('webserver', 'WEB_SERVER_PORT'),
type=int,
help="The port on which to run the server"),
'workers': Arg(
("-w", "--workers"),
default=conf.get('webserver', 'WORKERS'),
type=int,
help="Number of workers to run the webserver on"),
'workerclass': Arg(
("-k", "--workerclass"),
default=conf.get('webserver', 'WORKER_CLASS'),
choices=['sync', 'eventlet', 'gevent', 'tornado'],
help="The worker class to use for Gunicorn"),
'worker_timeout': Arg(
("-t", "--worker_timeout"),
default=conf.get('webserver', 'WEB_SERVER_WORKER_TIMEOUT'),
type=int,
help="The timeout for waiting on webserver workers"),
'hostname': Arg(
("-hn", "--hostname"),
default=conf.get('webserver', 'WEB_SERVER_HOST'),
help="Set the hostname on which to run the web server"),
'debug': Arg(
("-d", "--debug"),
"Use the server that ships with Flask in debug mode",
"store_true"),
'access_logfile': Arg(
("-A", "--access_logfile"),
default=conf.get('webserver', 'ACCESS_LOGFILE'),
help="The logfile to store the webserver access log. Use '-' to print to "
"stderr."),
'error_logfile': Arg(
("-E", "--error_logfile"),
default=conf.get('webserver', 'ERROR_LOGFILE'),
help="The logfile to store the webserver error log. Use '-' to print to "
"stderr."),
# resetdb
'yes': Arg(
("-y", "--yes"),
"Do not prompt to confirm reset. Use with care!",
"store_true",
default=False),
# scheduler
'dag_id_opt': Arg(("-d", "--dag_id"), help="The id of the dag to run"),
'num_runs': Arg(
("-n", "--num_runs"),
default=None, type=int,
help="Set the number of runs to execute before exiting"),
# worker
'do_pickle': Arg(
("-p", "--do_pickle"),
default=False,
help=(
"Attempt to pickle the DAG object to send over "
"to the workers, instead of letting workers run their version "
"of the code."),
action="store_true"),
'queues': Arg(
("-q", "--queues"),
help="Comma delimited list of queues to serve",
default=conf.get('celery', 'DEFAULT_QUEUE')),
'concurrency': Arg(
("-c", "--concurrency"),
type=int,
help="The number of worker processes",
default=conf.get('celery', 'celeryd_concurrency')),
# flower
'broker_api': Arg(("-a", "--broker_api"), help="Broker api"),
'flower_hostname': Arg(
("-hn", "--hostname"),
default=conf.get('celery', 'FLOWER_HOST'),
help="Set the hostname on which to run the server"),
'flower_port': Arg(
("-p", "--port"),
default=conf.get('celery', 'FLOWER_PORT'),
type=int,
help="The port on which to run the server"),
'task_params': Arg(
("-tp", "--task_params"),
help="Sends a JSON params dict to the task"),
}
subparsers = (
{
'func': backfill,
'help': "Run subsections of a DAG for a specified date range",
'args': (
'dag_id', 'task_regex', 'start_date', 'end_date',
'mark_success', 'local', 'donot_pickle', 'include_adhoc',
'bf_ignore_dependencies', 'bf_ignore_first_depends_on_past',
'subdir', 'pool', 'dry_run')
}, {
'func': list_tasks,
'help': "List the tasks within a DAG",
'args': ('dag_id', 'tree', 'subdir'),
}, {
'func': clear,
'help': "Clear a set of task instance, as if they never ran",
'args': (
'dag_id', 'task_regex', 'start_date', 'end_date', 'subdir',
'upstream', 'downstream', 'no_confirm', 'only_failed',
'only_running', 'exclude_subdags'),
}, {
'func': pause,
'help': "Pause a DAG",
'args': ('dag_id', 'subdir'),
}, {
'func': unpause,
'help': "Resume a paused DAG",
'args': ('dag_id', 'subdir'),
}, {
'func': trigger_dag,
'help': "Trigger a DAG run",
'args': ('dag_id', 'subdir', 'run_id', 'conf'),
}, {
'func': variables,
'help': "List all variables",
"args": ('set', 'get', 'json', 'default'),
}, {
'func': kerberos,
'help': "Start a kerberos ticket renewer",
'args': ('principal', 'keytab', 'pid',
'daemon', 'stdout', 'stderr', 'log_file'),
}, {
'func': render,
'help': "Render a task instance's template(s)",
'args': ('dag_id', 'task_id', 'execution_date', 'subdir'),
}, {
'func': run,
'help': "Run a single task instance",
'args': (
'dag_id', 'task_id', 'execution_date', 'subdir',
'mark_success', 'force', 'pool',
'local', 'raw', 'ignore_dependencies',
'ignore_depends_on_past', 'ship_dag', 'pickle', 'job_id'),
}, {
'func': initdb,
'help': "Initialize the metadata database",
'args': tuple(),
}, {
'func': list_dags,
'help': "List all the DAGs",
'args': ('subdir', 'report'),
}, {
'func': dag_state,
'help': "Get the status of a dag run",
'args': ('dag_id', 'execution_date', 'subdir'),
}, {
'func': task_state,
'help': "Get the status of a task instance",
'args': ('dag_id', 'task_id', 'execution_date', 'subdir'),
}, {
'func': serve_logs,
'help': "Serve logs generate by worker",
'args': tuple(),
}, {
'func': test,
'help': (
"Test a task instance. This will run a task without checking for "
"dependencies or recording it's state in the database."),
'args': (
'dag_id', 'task_id', 'execution_date', 'subdir', 'dry_run',
'task_params'),
}, {
'func': webserver,
'help': "Start a Airflow webserver instance",
'args': ('port', 'workers', 'workerclass', 'worker_timeout', 'hostname',
'pid', 'daemon', 'stdout', 'stderr', 'access_logfile',
'error_logfile', 'log_file', 'debug'),
}, {
'func': resetdb,
'help': "Burn down and rebuild the metadata database",
'args': ('yes',),
}, {
'func': upgradedb,
'help': "Upgrade the metadata database to latest version",
'args': tuple(),
}, {
'func': scheduler,
'help': "Start a scheduler instance",
'args': ('dag_id_opt', 'subdir', 'num_runs', 'do_pickle',
'pid', 'daemon', 'stdout', 'stderr', 'log_file'),
}, {
'func': worker,
'help': "Start a Celery worker node",
'args': ('do_pickle', 'queues', 'concurrency',
'pid', 'daemon', 'stdout', 'stderr', 'log_file'),
}, {
'func': flower,
'help': "Start a Celery Flower",
'args': ('flower_hostname', 'flower_port', 'broker_api',
'pid', 'daemon', 'stdout', 'stderr', 'log_file'),
}, {
'func': version,
'help': "Show the version",
'args': tuple(),
},
)
subparsers_dict = {sp['func'].__name__: sp for sp in subparsers}
dag_subparsers = (
'list_tasks', 'backfill', 'test', 'run', 'pause', 'unpause')
@classmethod
def get_parser(cls, dag_parser=False):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
help='sub-command help', dest='subcommand')
subparsers.required = True
subparser_list = cls.dag_subparsers if dag_parser else cls.subparsers_dict.keys()
for sub in subparser_list:
sub = cls.subparsers_dict[sub]
sp = subparsers.add_parser(sub['func'].__name__, help=sub['help'])
for arg in sub['args']:
if 'dag_id' in arg and dag_parser:
continue
arg = cls.args[arg]
kwargs = {
f: getattr(arg, f)
for f in arg._fields if f != 'flags' and getattr(arg, f)}
sp.add_argument(*arg.flags, **kwargs)
sp.set_defaults(func=sub['func'])
return parser
def get_parser():
return CLIFactory.get_parser()
| |
import re
from rest_framework import serializers
from pyparsing import ParseException
from lims.permissions.permissions import SerializerPermissionsMixin
from lims.equipment.models import Equipment
from lims.filetemplate.models import FileTemplate
from lims.inventory.models import ItemType, AmountMeasure
from lims.inventory.serializers import ItemTransferPreviewSerializer
from lims.projects.serializers import DetailedProductSerializer, SimpleProductSerializer
from .models import (Workflow,
Run,
TaskTemplate, InputFieldTemplate, VariableFieldTemplate,
OutputFieldTemplate, CalculationFieldTemplate, StepFieldTemplate,
StepFieldProperty)
from .calculation import NumericStringParser
class WorkflowSerializer(SerializerPermissionsMixin, serializers.ModelSerializer):
created_by = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
class Meta:
model = Workflow
fields = '__all__'
class InputFieldTemplateSerializer(serializers.ModelSerializer):
measure = serializers.SlugRelatedField(
queryset=AmountMeasure.objects.all(),
slug_field='symbol'
)
lookup_type = serializers.SlugRelatedField(
queryset=ItemType.objects.all(),
slug_field='name'
)
field_name = serializers.CharField(read_only=True)
store_value_in = serializers.CharField(read_only=True)
class Meta:
model = InputFieldTemplate
fields = '__all__'
class InputFieldValueSerializer(serializers.Serializer):
"""
Serializes the values from an input field
"""
label = serializers.CharField()
amount = serializers.FloatField()
measure = serializers.CharField()
inventory_identifier = serializers.CharField(required=False)
from_input_file = serializers.NullBooleanField()
from_calculation = serializers.BooleanField(required=False, default=False)
calculation_used = serializers.IntegerField(required=False, allow_null=True)
auto_find_in_inventory = serializers.BooleanField(required=False, default=False)
destination_barcode = serializers.CharField(required=False, allow_null=True)
destination_coordinates = serializers.CharField(required=False, allow_null=True)
class VariableFieldTemplateSerializer(serializers.ModelSerializer):
measure = serializers.SlugRelatedField(
allow_null=True,
required=False,
queryset=AmountMeasure.objects.all(),
slug_field='symbol'
)
field_name = serializers.CharField(read_only=True)
class Meta:
model = VariableFieldTemplate
fields = '__all__'
class VariableFieldValueSerializer(serializers.Serializer):
"""
Serializes the values from an input field
"""
label = serializers.CharField()
amount = serializers.FloatField()
measure = serializers.CharField(required=False, allow_null=True)
measure_not_required = serializers.NullBooleanField(required=False)
calculation_used = serializers.IntegerField(required=False, allow_null=True)
class OutputFieldTemplateSerializer(serializers.ModelSerializer):
measure = serializers.SlugRelatedField(
queryset=AmountMeasure.objects.all(),
slug_field='symbol'
)
lookup_type = serializers.SlugRelatedField(
queryset=ItemType.objects.all(),
slug_field='name'
)
field_name = serializers.CharField(read_only=True)
class Meta:
model = OutputFieldTemplate
fields = '__all__'
fields = '__all__'
class OutputFieldValueSerializer(serializers.Serializer):
"""
Serializes the values from an input field
"""
label = serializers.CharField()
amount = serializers.FloatField()
measure = serializers.CharField()
lookup_type = serializers.CharField()
calculation_used = serializers.IntegerField(required=False, allow_null=True)
class CalculationFieldTemplateSerializer(serializers.ModelSerializer):
field_name = serializers.CharField(read_only=True)
class Meta:
model = CalculationFieldTemplate
fields = '__all__'
fields = '__all__'
class CalculationFieldIDTemplateSerializer(CalculationFieldTemplateSerializer):
"""
Used for when an ID is also needed
"""
id = serializers.IntegerField()
class CalculationFieldValueSerializer(serializers.Serializer):
"""
Serializes the values from an input field
"""
id = serializers.IntegerField()
label = serializers.CharField()
calculation = serializers.CharField()
class StepFieldPropertySerializer(serializers.ModelSerializer):
id = serializers.IntegerField(allow_null=True, required=False)
measure = serializers.SlugRelatedField(
queryset=AmountMeasure.objects.all(),
slug_field='symbol',
required=False,
allow_null=True,
)
field_name = serializers.CharField(read_only=True)
class Meta:
model = StepFieldProperty
fields = ('id', 'measure', 'amount', 'label',
'from_calculation', 'calculation_used', 'measure_not_required', 'field_name',)
class StepFieldPropertyValueSerializer(serializers.Serializer):
"""
Serializes the values from an input field
"""
id = serializers.IntegerField(required=False, allow_null=True)
label = serializers.CharField()
amount = serializers.FloatField()
measure = serializers.CharField(required=False, allow_null=True)
measure_not_required = serializers.NullBooleanField(required=False)
calculation_used = serializers.IntegerField(required=False, allow_null=True)
class StepFieldTemplateSerializer(serializers.ModelSerializer):
properties = StepFieldPropertySerializer(many=True, required=False)
field_name = serializers.CharField(read_only=True)
class Meta:
model = StepFieldTemplate
fields = '__all__'
def create(self, validated_data):
try:
property_fields = validated_data.pop('properties')
except:
property_fields = []
step = StepFieldTemplate.objects.create(**validated_data)
for field in property_fields:
StepFieldProperty.objects.create(step=step, **field)
return step
def update(self, instance, validated_data):
properties_data = validated_data.pop('properties')
properties = instance.properties
instance.name = validated_data.get('label', instance.label)
instance.description = validated_data.get('description', instance.description)
instance.save()
property_ids = [item['id'] for item in properties_data if 'id' in item]
for field in properties.all():
if field.id not in property_ids:
field.delete()
for f in properties_data:
field = StepFieldProperty(step=instance, **f)
field.save()
return instance
class StepFieldValueSerializer(serializers.Serializer):
label = serializers.CharField()
description = serializers.CharField(required=False, allow_null=True)
properties = StepFieldPropertyValueSerializer(many=True)
class TaskTemplateSerializer(SerializerPermissionsMixin, serializers.ModelSerializer):
created_by = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
product_input = serializers.SlugRelatedField(
queryset=ItemType.objects.all(),
slug_field='name'
)
product_input_measure = serializers.SlugRelatedField(
queryset=AmountMeasure.objects.all(),
slug_field='symbol'
)
labware = serializers.SlugRelatedField(
required=False,
allow_null=True,
queryset=ItemType.objects.all(),
slug_field='name'
)
labware_amount = serializers.IntegerField(required=False)
capable_equipment = serializers.SlugRelatedField(
required=False,
many=True,
queryset=Equipment.objects.all(),
slug_field='name'
)
input_files = serializers.SlugRelatedField(
required=False,
many=True,
queryset=FileTemplate.objects.all(),
slug_field='name'
)
output_files = serializers.SlugRelatedField(
required=False,
many=True,
queryset=FileTemplate.objects.all(),
slug_field='name'
)
equipment_files = serializers.SlugRelatedField(
required=False,
many=True,
queryset=FileTemplate.objects.all(),
slug_field='name'
)
input_fields = InputFieldTemplateSerializer(read_only=True, many=True)
variable_fields = VariableFieldTemplateSerializer(read_only=True, many=True)
calculation_fields = CalculationFieldTemplateSerializer(read_only=True, many=True)
output_fields = OutputFieldTemplateSerializer(read_only=True, many=True)
step_fields = StepFieldTemplateSerializer(read_only=True, many=True)
store_labware_as = serializers.CharField(read_only=True)
class Meta:
model = TaskTemplate
fields = '__all__'
def to_representation(self, obj):
rep = super(TaskTemplateSerializer, self).to_representation(obj)
self.handle_calculation(rep)
return rep
def _replace_fields(self, match):
"""
Replace field names with their correct values
"""
mtch = match.group(1)
if mtch in self.flat:
return str(self.flat[mtch])
return str(0)
def _perform_calculation(self, calculation):
"""
Parse and perform a calculation using a dict of fields
Using either a dict of values to field names
Returns a NaN if the calculation cannot be performed, e.g.
incorrect field names.
"""
nsp = NumericStringParser()
field_regex = r'\{(.+?)\}'
interpolated_calculation = re.sub(field_regex, self._replace_fields, calculation)
try:
result = nsp.eval(interpolated_calculation)
except ParseException:
return None
return result
def _flatten_values(self, rep):
flat_values = {}
for field_type in ['input_fields', 'step_fields', 'variable_fields']:
if field_type in rep:
for field in rep[field_type]:
if field_type == 'step_fields':
for prop in field['properties']:
flat_values[prop['label']] = prop['amount']
else:
flat_values[field['label']] = field['amount']
if 'product_input_amount' in rep:
flat_values['product_input_amount'] = rep['product_input_amount']
return flat_values
def handle_calculation(self, rep):
"""
Perform calculations on all calculation fields on the task
If any data is provided, use that as source for the calculations
rather than the defaults on the model.
"""
# Flatten fields into named dict/ordered dict
# Will need some sort of defer if not completed calculation dependent on other calculation
if 'calculation_fields' in rep:
self.flat = self._flatten_values(rep)
for calc in rep['calculation_fields']:
result = self._perform_calculation(calc['calculation'])
calc['result'] = result
return rep
class TaskTemplateNoProductInputSerializer(TaskTemplateSerializer):
product_input = serializers.SlugRelatedField(
queryset=ItemType.objects.all(),
slug_field='name',
required=False,
allow_null=True,
)
product_input_measure = serializers.SlugRelatedField(
queryset=AmountMeasure.objects.all(),
slug_field='symbol',
required=False,
allow_null=True,
)
product_input_amount = serializers.FloatField(required=False, allow_null=True)
class RecalculateTaskTemplateSerializer(TaskTemplateSerializer):
"""
Same as TaskTemplateSerializer but with ID's + no save
"""
id = serializers.IntegerField()
input_fields = InputFieldTemplateSerializer(many=True)
variable_fields = VariableFieldTemplateSerializer(many=True)
calculation_fields = CalculationFieldIDTemplateSerializer(many=True)
output_fields = OutputFieldTemplateSerializer(many=True)
step_fields = StepFieldTemplateSerializer(many=True)
store_labware_as = serializers.CharField()
created_by = serializers.CharField() # Prevents modification of read-only User objects
def save(self):
# NEVER allow this serializer to create a new object
return False
class SimpleTaskTemplateSerializer(TaskTemplateSerializer):
valid_product_input_types = serializers.ListField(read_only=True)
class Meta:
model = TaskTemplate
fields = ('id', 'name', 'description', 'product_input', 'valid_product_input_types',
'capable_equipment', 'created_by', 'date_created', 'product_input_not_required')
class TaskValuesSerializer(serializers.Serializer):
product_input_not_required = serializers.NullBooleanField(required=False)
product_input = serializers.CharField()
product_input_amount = serializers.FloatField()
product_input_measure = serializers.CharField()
labware_not_required = serializers.NullBooleanField()
labware_identifier = serializers.CharField(required=False, allow_null=True)
labware_amount = serializers.IntegerField(required=False)
labware_barcode = serializers.CharField(required=False, allow_null=True)
equipment_choice = serializers.CharField(required=False, allow_null=True)
input_fields = InputFieldValueSerializer(many=True)
variable_fields = VariableFieldValueSerializer(many=True)
calculation_fields = CalculationFieldValueSerializer(many=True)
output_fields = OutputFieldValueSerializer(many=True)
step_fields = StepFieldValueSerializer(many=True)
class TaskValuesNoProductInputSerializer(TaskValuesSerializer):
product_input = serializers.CharField(required=False, allow_null=True)
product_input_amount = serializers.FloatField(required=False, allow_null=True)
product_input_measure = serializers.CharField(required=False, allow_null=True)
class RunSerializer(SerializerPermissionsMixin, serializers.ModelSerializer):
"""
Provides basic serialisation of workflow run
"""
started_by = serializers.SlugRelatedField(
slug_field='username',
read_only=True
)
tasks_list = SimpleTaskTemplateSerializer(read_only=True, many=True,
source='get_tasks')
validate_inputs = serializers.DictField(source='has_valid_inputs', read_only=True)
products_list = SimpleProductSerializer(read_only=True, many=True, source='products')
class Meta:
model = Run
fields = '__all__'
class DetailedRunSerializer(serializers.ModelSerializer):
validate_inputs = serializers.DictField(source='has_valid_inputs')
products = DetailedProductSerializer(read_only=True, many=True)
tasks = SimpleTaskTemplateSerializer(read_only=True, many=True,
source='get_tasks')
transfers = ItemTransferPreviewSerializer(read_only=True, many=True)
class Meta:
model = Run
fields = '__all__'
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import (
core, gradient_checker, rnn_cell, workspace, scope, utils
)
from caffe2.python.attention import AttentionType
from caffe2.python.model_helper import ModelHelper, ExtractPredictorNet
from caffe2.python.rnn.rnn_cell_test_util import sigmoid, tanh, _prepare_rnn
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
from functools import partial
from hypothesis import given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
def lstm_unit(hidden_t_prev, cell_t_prev, gates,
seq_lengths, timestep, forget_bias=0.0, drop_states=False):
D = cell_t_prev.shape[2]
G = gates.shape[2]
N = gates.shape[1]
t = (timestep * np.ones(shape=(N, D))).astype(np.int32)
assert t.shape == (N, D)
seq_lengths = (np.ones(shape=(N, D)) *
seq_lengths.reshape(N, 1)).astype(np.int32)
assert seq_lengths.shape == (N, D)
assert G == 4 * D
# Resize to avoid broadcasting inconsistencies with NumPy
gates = gates.reshape(N, 4, D)
cell_t_prev = cell_t_prev.reshape(N, D)
i_t = gates[:, 0, :].reshape(N, D)
f_t = gates[:, 1, :].reshape(N, D)
o_t = gates[:, 2, :].reshape(N, D)
g_t = gates[:, 3, :].reshape(N, D)
i_t = sigmoid(i_t)
f_t = sigmoid(f_t + forget_bias)
o_t = sigmoid(o_t)
g_t = tanh(g_t)
valid = (t < seq_lengths).astype(np.int32)
assert valid.shape == (N, D)
cell_t = ((f_t * cell_t_prev) + (i_t * g_t)) * (valid) + \
(1 - valid) * cell_t_prev * (1 - drop_states)
assert cell_t.shape == (N, D)
hidden_t = (o_t * tanh(cell_t)) * valid + hidden_t_prev * (
1 - valid) * (1 - drop_states)
hidden_t = hidden_t.reshape(1, N, D)
cell_t = cell_t.reshape(1, N, D)
return hidden_t, cell_t
def lstm_reference(input, hidden_input, cell_input,
gates_w, gates_b, seq_lengths, forget_bias,
drop_states=False):
T = input.shape[0]
N = input.shape[1]
G = input.shape[2]
D = hidden_input.shape[hidden_input.ndim - 1]
hidden = np.zeros(shape=(T + 1, N, D))
cell = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert cell.shape[0] == T + 1
assert hidden.shape[1] == N
assert cell.shape[1] == N
cell[0, :, :] = cell_input
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, G)
hidden_t_prev = hidden[t].reshape(1, N, D)
cell_t_prev = cell[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T) + gates_b
gates = gates + input_t
hidden_t, cell_t = lstm_unit(
hidden_t_prev=hidden_t_prev,
cell_t_prev=cell_t_prev,
gates=gates,
seq_lengths=seq_lengths,
timestep=t,
forget_bias=forget_bias,
drop_states=drop_states,
)
hidden[t + 1] = hidden_t
cell[t + 1] = cell_t
return (
hidden[1:],
hidden[-1].reshape(1, N, D),
cell[1:],
cell[-1].reshape(1, N, D)
)
def multi_lstm_reference(input, hidden_input_list, cell_input_list,
i2h_w_list, i2h_b_list, gates_w_list, gates_b_list,
seq_lengths, forget_bias, drop_states=False):
num_layers = len(hidden_input_list)
assert len(cell_input_list) == num_layers
assert len(i2h_w_list) == num_layers
assert len(i2h_b_list) == num_layers
assert len(gates_w_list) == num_layers
assert len(gates_b_list) == num_layers
for i in range(num_layers):
layer_input = np.dot(input, i2h_w_list[i].T) + i2h_b_list[i]
h_all, h_last, c_all, c_last = lstm_reference(
layer_input,
hidden_input_list[i],
cell_input_list[i],
gates_w_list[i],
gates_b_list[i],
seq_lengths,
forget_bias,
drop_states=drop_states,
)
input = h_all
return h_all, h_last, c_all, c_last
def lstm_with_attention_reference(
input,
initial_hidden_state,
initial_cell_state,
initial_attention_weighted_encoder_context,
gates_w,
gates_b,
decoder_input_lengths,
weighted_decoder_hidden_state_t_w,
weighted_decoder_hidden_state_t_b,
weighted_encoder_outputs,
attention_v,
attention_zeros,
encoder_outputs_transposed,
):
encoder_outputs = np.transpose(encoder_outputs_transposed, axes=[2, 0, 1])
decoder_input_length = input.shape[0]
batch_size = input.shape[1]
decoder_input_dim = input.shape[2]
decoder_state_dim = initial_hidden_state.shape[2]
encoder_output_dim = weighted_encoder_outputs.shape[2]
hidden = np.zeros(
shape=(decoder_input_length + 1, batch_size, decoder_state_dim))
cell = np.zeros(
shape=(decoder_input_length + 1, batch_size, decoder_state_dim))
attention_weighted_encoder_context = np.zeros(
shape=(decoder_input_length + 1, batch_size, encoder_output_dim))
cell[0, :, :] = initial_cell_state
hidden[0, :, :] = initial_hidden_state
attention_weighted_encoder_context[0, :, :] = (
initial_attention_weighted_encoder_context
)
for t in range(decoder_input_length):
input_t = input[t].reshape(1, batch_size, decoder_input_dim)
hidden_t_prev = hidden[t].reshape(1, batch_size, decoder_state_dim)
cell_t_prev = cell[t].reshape(1, batch_size, decoder_state_dim)
attention_weighted_encoder_context_t_prev = (
attention_weighted_encoder_context[t].reshape(
1, batch_size, encoder_output_dim)
)
gates_input = np.concatenate(
(hidden_t_prev, attention_weighted_encoder_context_t_prev),
axis=2,
)
gates = np.dot(gates_input, gates_w.T) + gates_b
gates = gates + input_t
hidden_t, cell_t = lstm_unit(hidden_t_prev, cell_t_prev, gates,
decoder_input_lengths, t, 0)
hidden[t + 1] = hidden_t
cell[t + 1] = cell_t
weighted_hidden_t = np.dot(
hidden_t,
weighted_decoder_hidden_state_t_w.T,
) + weighted_decoder_hidden_state_t_b
attention_v = attention_v.reshape([-1])
attention_logits_t = np.sum(
attention_v * np.tanh(weighted_encoder_outputs + weighted_hidden_t),
axis=2,
)
attention_logits_t_exp = np.exp(attention_logits_t)
attention_weights_t = (
attention_logits_t_exp /
np.sum(attention_logits_t_exp, axis=0).reshape([1, -1])
)
attention_weighted_encoder_context[t + 1] = np.sum(
(
encoder_outputs *
attention_weights_t.reshape([-1, batch_size, 1])
),
axis=0,
)
return (
hidden[1:],
hidden[-1].reshape(1, batch_size, decoder_state_dim),
cell[1:],
cell[-1].reshape(1, batch_size, decoder_state_dim),
attention_weighted_encoder_context[1:],
attention_weighted_encoder_context[-1].reshape(
1,
batch_size,
encoder_output_dim,
)
)
def lstm_with_recurrent_attention_reference(
input,
initial_hidden_state,
initial_cell_state,
initial_attention_weighted_encoder_context,
gates_w,
gates_b,
decoder_input_lengths,
weighted_prev_attention_context_w,
weighted_prev_attention_context_b,
weighted_decoder_hidden_state_t_w,
weighted_decoder_hidden_state_t_b,
weighted_encoder_outputs,
attention_v,
attention_zeros,
encoder_outputs_transposed,
):
encoder_outputs = np.transpose(encoder_outputs_transposed, axes=[2, 0, 1])
decoder_input_length = input.shape[0]
batch_size = input.shape[1]
decoder_input_dim = input.shape[2]
decoder_state_dim = initial_hidden_state.shape[2]
encoder_output_dim = weighted_encoder_outputs.shape[2]
hidden = np.zeros(
shape=(decoder_input_length + 1, batch_size, decoder_state_dim))
cell = np.zeros(
shape=(decoder_input_length + 1, batch_size, decoder_state_dim))
attention_weighted_encoder_context = np.zeros(
shape=(decoder_input_length + 1, batch_size, encoder_output_dim))
cell[0, :, :] = initial_cell_state
hidden[0, :, :] = initial_hidden_state
attention_weighted_encoder_context[0, :, :] = (
initial_attention_weighted_encoder_context
)
for t in range(decoder_input_length):
input_t = input[t].reshape(1, batch_size, decoder_input_dim)
hidden_t_prev = hidden[t].reshape(1, batch_size, decoder_state_dim)
cell_t_prev = cell[t].reshape(1, batch_size, decoder_state_dim)
attention_weighted_encoder_context_t_prev = (
attention_weighted_encoder_context[t].reshape(
1, batch_size, encoder_output_dim)
)
gates_input = np.concatenate(
(hidden_t_prev, attention_weighted_encoder_context_t_prev),
axis=2,
)
gates = np.dot(gates_input, gates_w.T) + gates_b
gates = gates + input_t
hidden_t, cell_t = lstm_unit(hidden_t_prev, cell_t_prev, gates,
decoder_input_lengths, t, 0)
hidden[t + 1] = hidden_t
cell[t + 1] = cell_t
weighted_hidden_t = np.dot(
hidden_t,
weighted_decoder_hidden_state_t_w.T,
) + weighted_decoder_hidden_state_t_b
weighted_prev_attention_context = np.dot(
attention_weighted_encoder_context_t_prev,
weighted_prev_attention_context_w.T
) + weighted_prev_attention_context_b
attention_v = attention_v.reshape([-1])
attention_logits_t = np.sum(
attention_v * np.tanh(
weighted_encoder_outputs + weighted_hidden_t +
weighted_prev_attention_context
),
axis=2,
)
attention_logits_t_exp = np.exp(attention_logits_t)
attention_weights_t = (
attention_logits_t_exp /
np.sum(attention_logits_t_exp, axis=0).reshape([1, -1])
)
attention_weighted_encoder_context[t + 1] = np.sum(
(
encoder_outputs *
attention_weights_t.reshape([-1, batch_size, 1])
),
axis=0,
)
return (
hidden[1:],
hidden[-1].reshape(1, batch_size, decoder_state_dim),
cell[1:],
cell[-1].reshape(1, batch_size, decoder_state_dim),
attention_weighted_encoder_context[1:],
attention_weighted_encoder_context[-1].reshape(
1,
batch_size,
encoder_output_dim,
)
)
def milstm_reference(
input,
hidden_input,
cell_input,
gates_w,
gates_b,
alpha,
beta1,
beta2,
b,
seq_lengths,
forget_bias,
drop_states=False):
T = input.shape[0]
N = input.shape[1]
G = input.shape[2]
D = hidden_input.shape[hidden_input.ndim - 1]
hidden = np.zeros(shape=(T + 1, N, D))
cell = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert cell.shape[0] == T + 1
assert hidden.shape[1] == N
assert cell.shape[1] == N
cell[0, :, :] = cell_input
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, G)
hidden_t_prev = hidden[t].reshape(1, N, D)
cell_t_prev = cell[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T) + gates_b
gates = (alpha * gates * input_t) + \
(beta1 * gates) + \
(beta2 * input_t) + \
b
hidden_t, cell_t = lstm_unit(
hidden_t_prev,
cell_t_prev,
gates,
seq_lengths,
t,
forget_bias,
drop_states=drop_states,
)
hidden[t + 1] = hidden_t
cell[t + 1] = cell_t
return (
hidden[1:],
hidden[-1].reshape(1, N, D),
cell[1:],
cell[-1].reshape(1, N, D)
)
def lstm_input():
'''
Create input tensor where each dimension is from 1 to 4, ndim=3 and
last dimension size is a factor of 4
'''
dims_ = st.tuples(
st.integers(min_value=1, max_value=4), # t
st.integers(min_value=1, max_value=4), # n
st.integers(min_value=1, max_value=4), # d
)
def create_input(dims):
dims = list(dims)
dims[2] *= 4
return hu.arrays(dims)
return dims_.flatmap(create_input)
def _prepare_attention(t, n, dim_in, encoder_dim,
forward_only=False, T=None,
dim_out=None, residual=False):
if dim_out is None:
dim_out = [dim_in]
print("Dims: t={} n={} dim_in={} dim_out={}".format(t, n, dim_in, dim_out))
model = ModelHelper(name='external')
def generate_input_state(shape):
return np.random.random(shape).astype(np.float32)
initial_states = []
for layer_id, d in enumerate(dim_out):
h, c = model.net.AddExternalInputs(
"hidden_init_{}".format(layer_id),
"cell_init_{}".format(layer_id),
)
initial_states.extend([h, c])
workspace.FeedBlob(h, generate_input_state((1, n, d)))
workspace.FeedBlob(c, generate_input_state((1, n, d)))
awec_init = model.net.AddExternalInputs([
'initial_attention_weighted_encoder_context',
])
initial_states.append(awec_init)
workspace.FeedBlob(
awec_init,
generate_input_state((1, n, encoder_dim)),
)
# Due to convoluted RNN scoping logic we make sure that things
# work from a namescope
with scope.NameScope("test_name_scope"):
(
input_blob,
seq_lengths,
encoder_outputs,
weighted_encoder_outputs,
) = model.net.AddScopedExternalInputs(
'input_blob',
'seq_lengths',
'encoder_outputs',
'weighted_encoder_outputs',
)
layer_input_dim = dim_in
cells = []
for layer_id, d in enumerate(dim_out):
cell = rnn_cell.MILSTMCell(
name='decoder_{}'.format(layer_id),
forward_only=forward_only,
input_size=layer_input_dim,
hidden_size=d,
forget_bias=0.0,
memory_optimization=False,
)
cells.append(cell)
layer_input_dim = d
decoder_cell = rnn_cell.MultiRNNCell(
cells,
name='decoder',
residual_output_layers=range(1, len(cells)) if residual else None,
)
attention_cell = rnn_cell.AttentionCell(
encoder_output_dim=encoder_dim,
encoder_outputs=encoder_outputs,
decoder_cell=decoder_cell,
decoder_state_dim=dim_out[-1],
name='attention_decoder',
attention_type=AttentionType.Recurrent,
weighted_encoder_outputs=weighted_encoder_outputs,
attention_memory_optimization=True,
)
attention_cell = (
attention_cell if T is None
else rnn_cell.UnrolledCell(attention_cell, T)
)
output_indices = decoder_cell.output_indices
output_indices.append(2 * len(cells))
outputs_with_grads = [2 * i for i in output_indices]
final_output, state_outputs = attention_cell.apply_over_sequence(
model=model,
inputs=input_blob,
seq_lengths=seq_lengths,
initial_states=initial_states,
outputs_with_grads=outputs_with_grads,
)
workspace.RunNetOnce(model.param_init_net)
workspace.FeedBlob(
seq_lengths,
np.random.randint(1, t + 1, size=(n,)).astype(np.int32)
)
return {
'final_output': final_output,
'net': model.net,
'initial_states': initial_states,
'input_blob': input_blob,
'encoder_outputs': encoder_outputs,
'weighted_encoder_outputs': weighted_encoder_outputs,
'outputs_with_grads': outputs_with_grads,
}
class MulCell(rnn_cell.RNNCell):
def _apply(self, model, input_t,
seq_lengths, states, timestep, extra_inputs):
assert len(states) == 1
result = model.net.Mul([input_t, states[0]])
model.net.AddExternalOutput(result)
return [result]
def get_state_names(self):
return [self.scope("state")]
def prepare_mul_rnn(model, input_blob, shape, T, outputs_with_grad, num_layers):
print("Shape: ", shape)
t, n, d = shape
cells = [MulCell(name="layer_{}".format(i)) for i in range(num_layers)]
cell = rnn_cell.MultiRNNCell(name="multi_mul_rnn", cells=cells)
if T is not None:
cell = rnn_cell.UnrolledCell(cell, T=T)
states = [
model.param_init_net.ConstantFill(
[], "initial_state_{}".format(i), value=1.0, shape=[1, n, d])
for i in range(num_layers)]
_, results = cell.apply_over_sequence(
model=model,
inputs=input_blob,
initial_states=states,
outputs_with_grads=[
x + 2 * (num_layers - 1) for x in outputs_with_grad
],
seq_lengths=None,
)
return results[-2:]
class RNNCellTest(hu.HypothesisTestCase):
@given(
input_tensor=hu.tensor(min_dim=3, max_dim=3, max_value=3),
num_layers=st.integers(1, 4),
outputs_with_grad=st.sampled_from(
[[0], [1], [0, 1]]
),
)
@ht_settings(max_examples=10)
def test_unroll_mul(self, input_tensor, num_layers, outputs_with_grad):
outputs = []
nets = []
input_blob = None
for T in [input_tensor.shape[0], None]:
model = ModelHelper("rnn_mul_{}".format(
"unroll" if T else "dynamic"))
input_blob = model.net.AddExternalInputs("input_blob")
outputs.append(
prepare_mul_rnn(model, input_blob, input_tensor.shape, T,
outputs_with_grad, num_layers))
workspace.RunNetOnce(model.param_init_net)
nets.append(model.net)
workspace.blobs[input_blob] = input_tensor
gradient_checker.NetGradientChecker.CompareNets(
nets, outputs, outputs_with_grad_ids=outputs_with_grad,
inputs_with_grads=[input_blob],
)
@given(
input_tensor=hu.tensor(min_dim=3, max_dim=3, max_value=3),
forget_bias=st.floats(-10.0, 10.0),
drop_states=st.booleans(),
dim_out=st.lists(
elements=st.integers(min_value=1, max_value=3),
min_size=1, max_size=3,
),
outputs_with_grads=st.sampled_from(
[[0], [1], [0, 1], [0, 2], [0, 1, 2, 3]]
)
)
@ht_settings(max_examples=10)
@utils.debug
def test_unroll_lstm(self, input_tensor, dim_out, outputs_with_grads,
**kwargs):
lstms = [
_prepare_rnn(
*input_tensor.shape,
create_rnn=rnn_cell.LSTM,
outputs_with_grads=outputs_with_grads,
T=T,
two_d_initial_states=False,
dim_out=dim_out,
**kwargs
) for T in [input_tensor.shape[0], None]
]
outputs, nets, inputs = zip(*lstms)
workspace.FeedBlob(inputs[0][-1], input_tensor)
assert inputs[0] == inputs[1]
gradient_checker.NetGradientChecker.CompareNets(
nets, outputs, outputs_with_grads,
inputs_with_grads=inputs[0],
)
@given(
input_tensor=hu.tensor(min_dim=3, max_dim=3, max_value=3),
encoder_length=st.integers(min_value=1, max_value=3),
encoder_dim=st.integers(min_value=1, max_value=3),
hidden_units=st.integers(min_value=1, max_value=3),
num_layers=st.integers(min_value=1, max_value=3),
residual=st.booleans(),
)
@ht_settings(max_examples=10)
@utils.debug
def test_unroll_attention(self, input_tensor, encoder_length,
encoder_dim, hidden_units,
num_layers, residual):
dim_out = [hidden_units] * num_layers
encoder_tensor = np.random.random(
(encoder_length, input_tensor.shape[1], encoder_dim),
).astype('float32')
print('Decoder input shape: {}'.format(input_tensor.shape))
print('Encoder output shape: {}'.format(encoder_tensor.shape))
# Necessary because otherwise test fails for networks with fewer
# layers than previous test. TODO: investigate why.
workspace.ResetWorkspace()
net, unrolled = [
_prepare_attention(
t=input_tensor.shape[0],
n=input_tensor.shape[1],
dim_in=input_tensor.shape[2],
encoder_dim=encoder_dim,
T=T,
dim_out=dim_out,
residual=residual) for T in [input_tensor.shape[0], None]
]
workspace.FeedBlob(net['input_blob'], input_tensor)
workspace.FeedBlob(net['encoder_outputs'], encoder_tensor)
workspace.FeedBlob(
net['weighted_encoder_outputs'],
np.random.random(encoder_tensor.shape).astype('float32'),
)
for input_name in [
'input_blob',
'encoder_outputs',
'weighted_encoder_outputs',
]:
assert net[input_name] == unrolled[input_name]
for state_name, unrolled_state_name in zip(
net['initial_states'],
unrolled['initial_states'],
):
assert state_name == unrolled_state_name
inputs_with_grads = net['initial_states'] + [
net['input_blob'],
net['encoder_outputs'],
net['weighted_encoder_outputs'],
]
gradient_checker.NetGradientChecker.CompareNets(
[net['net'], unrolled['net']],
[[net['final_output']], [unrolled['final_output']]],
[0],
inputs_with_grads=inputs_with_grads,
threshold=0.000001,
)
@given(
input_tensor=hu.tensor(min_dim=3, max_dim=3),
forget_bias=st.floats(-10.0, 10.0),
forward_only=st.booleans(),
drop_states=st.booleans(),
)
@ht_settings(max_examples=10)
def test_layered_lstm(self, input_tensor, **kwargs):
for outputs_with_grads in [[0], [1], [0, 1, 2, 3]]:
for memory_optim in [False, True]:
_, net, inputs = _prepare_rnn(
*input_tensor.shape,
create_rnn=rnn_cell.LSTM,
outputs_with_grads=outputs_with_grads,
memory_optim=memory_optim,
**kwargs
)
workspace.FeedBlob(inputs[-1], input_tensor)
workspace.RunNetOnce(net)
workspace.ResetWorkspace()
@given(
input_tensor=lstm_input(),
forget_bias=st.floats(-10.0, 10.0),
fwd_only=st.booleans(),
drop_states=st.booleans(),
)
@ht_settings(max_examples=3, timeout=100)
@utils.debug
def test_lstm_main(self, **kwargs):
for lstm_type in [(rnn_cell.LSTM, lstm_reference),
(rnn_cell.MILSTM, milstm_reference)]:
for outputs_with_grads in [[0], [1], [0, 1, 2, 3]]:
for memory_optim in [False, True]:
self.lstm_base(lstm_type,
outputs_with_grads=outputs_with_grads,
memory_optim=memory_optim,
**kwargs)
def lstm_base(self, lstm_type, outputs_with_grads, memory_optim,
input_tensor, forget_bias, fwd_only, drop_states):
print("LSTM test parameters: ", locals())
create_lstm, ref = lstm_type
ref = partial(ref, forget_bias=forget_bias)
t, n, d = input_tensor.shape
assert d % 4 == 0
d = d // 4
ref = partial(ref, forget_bias=forget_bias, drop_states=drop_states)
net = _prepare_rnn(t, n, d, create_lstm,
outputs_with_grads=outputs_with_grads,
memory_optim=memory_optim,
forget_bias=forget_bias,
forward_only=fwd_only,
drop_states=drop_states)[1]
# here we don't provide a real input for the net but just for one of
# its ops (RecurrentNetworkOp). So have to hardcode this name
workspace.FeedBlob("test_name_scope/external/recurrent/i2h",
input_tensor)
op = net._net.op[-1]
inputs = [workspace.FetchBlob(name) for name in op.input]
self.assertReferenceChecks(
hu.cpu_do,
op,
inputs,
ref,
outputs_to_check=list(range(4)),
)
# Checking for input, gates_t_w and gates_t_b gradients
if not fwd_only:
for param in range(5):
self.assertGradientChecks(
device_option=hu.cpu_do,
op=op,
inputs=inputs,
outputs_to_check=param,
outputs_with_grads=outputs_with_grads,
threshold=0.01,
stepsize=0.005,
)
def test_lstm_extract_predictor_net(self):
model = ModelHelper(name="lstm_extract_test")
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):
output, _, _, _ = rnn_cell.LSTM(
model=model,
input_blob="input",
seq_lengths="seqlengths",
initial_states=("hidden_init", "cell_init"),
dim_in=20,
dim_out=40,
scope="test",
drop_states=True,
return_last_layer_only=True,
)
# Run param init net to get the shapes for all inputs
shapes = {}
workspace.RunNetOnce(model.param_init_net)
for b in workspace.Blobs():
shapes[b] = workspace.FetchBlob(b).shape
# But export in CPU
(predict_net, export_blobs) = ExtractPredictorNet(
net_proto=model.net.Proto(),
input_blobs=["input"],
output_blobs=[output],
device=core.DeviceOption(caffe2_pb2.CPU, 1),
)
# Create the net and run once to see it is valid
# Populate external inputs with correctly shaped random input
# and also ensure that the export_blobs was constructed correctly.
workspace.ResetWorkspace()
shapes['input'] = [10, 4, 20]
shapes['cell_init'] = [1, 4, 40]
shapes['hidden_init'] = [1, 4, 40]
print(predict_net.Proto().external_input)
self.assertTrue('seqlengths' in predict_net.Proto().external_input)
for einp in predict_net.Proto().external_input:
if einp == 'seqlengths':
workspace.FeedBlob(
"seqlengths",
np.array([10] * 4, dtype=np.int32)
)
else:
workspace.FeedBlob(
einp,
np.zeros(shapes[einp]).astype(np.float32),
)
if einp != 'input':
self.assertTrue(einp in export_blobs)
print(str(predict_net.Proto()))
self.assertTrue(workspace.CreateNet(predict_net.Proto()))
self.assertTrue(workspace.RunNet(predict_net.Proto().name))
# Validate device options set correctly for the RNNs
import google.protobuf.text_format as protobuftx
for op in predict_net.Proto().op:
if op.type == 'RecurrentNetwork':
for arg in op.arg:
if arg.name == "step_net":
step_proto = caffe2_pb2.NetDef()
protobuftx.Merge(arg.s.decode("ascii"), step_proto)
for step_op in step_proto.op:
self.assertEqual(0, step_op.device_option.device_type)
self.assertEqual(1, step_op.device_option.cuda_gpu_id)
elif arg.name == 'backward_step_net':
self.assertEqual(b"", arg.s)
@given(encoder_output_length=st.integers(1, 3),
encoder_output_dim=st.integers(1, 3),
decoder_input_length=st.integers(1, 3),
decoder_state_dim=st.integers(1, 3),
batch_size=st.integers(1, 3),
**hu.gcs)
def test_lstm_with_attention(
self,
encoder_output_length,
encoder_output_dim,
decoder_input_length,
decoder_state_dim,
batch_size,
gc,
dc,
):
self.lstm_with_attention(
partial(
rnn_cell.LSTMWithAttention,
attention_type=AttentionType.Regular,
),
encoder_output_length,
encoder_output_dim,
decoder_input_length,
decoder_state_dim,
batch_size,
lstm_with_attention_reference,
gc,
)
@given(encoder_output_length=st.integers(1, 3),
encoder_output_dim=st.integers(1, 3),
decoder_input_length=st.integers(1, 3),
decoder_state_dim=st.integers(1, 3),
batch_size=st.integers(1, 3),
**hu.gcs)
def test_lstm_with_recurrent_attention(
self,
encoder_output_length,
encoder_output_dim,
decoder_input_length,
decoder_state_dim,
batch_size,
gc,
dc,
):
self.lstm_with_attention(
partial(
rnn_cell.LSTMWithAttention,
attention_type=AttentionType.Recurrent,
),
encoder_output_length,
encoder_output_dim,
decoder_input_length,
decoder_state_dim,
batch_size,
lstm_with_recurrent_attention_reference,
gc,
)
def lstm_with_attention(
self,
create_lstm_with_attention,
encoder_output_length,
encoder_output_dim,
decoder_input_length,
decoder_state_dim,
batch_size,
ref,
gc,
):
model = ModelHelper(name='external')
with core.DeviceScope(gc):
(
encoder_outputs,
decoder_inputs,
decoder_input_lengths,
initial_decoder_hidden_state,
initial_decoder_cell_state,
initial_attention_weighted_encoder_context,
) = model.net.AddExternalInputs(
'encoder_outputs',
'decoder_inputs',
'decoder_input_lengths',
'initial_decoder_hidden_state',
'initial_decoder_cell_state',
'initial_attention_weighted_encoder_context',
)
create_lstm_with_attention(
model=model,
decoder_inputs=decoder_inputs,
decoder_input_lengths=decoder_input_lengths,
initial_decoder_hidden_state=initial_decoder_hidden_state,
initial_decoder_cell_state=initial_decoder_cell_state,
initial_attention_weighted_encoder_context=(
initial_attention_weighted_encoder_context
),
encoder_output_dim=encoder_output_dim,
encoder_outputs=encoder_outputs,
decoder_input_dim=decoder_state_dim,
decoder_state_dim=decoder_state_dim,
scope='external/LSTMWithAttention',
)
op = model.net._net.op[-2]
workspace.RunNetOnce(model.param_init_net)
# This is original decoder_inputs after linear layer
decoder_input_blob = op.input[0]
workspace.FeedBlob(
decoder_input_blob,
np.random.randn(
decoder_input_length,
batch_size,
decoder_state_dim * 4,
).astype(np.float32))
workspace.FeedBlob(
'external/LSTMWithAttention/encoder_outputs_transposed',
np.random.randn(
batch_size,
encoder_output_dim,
encoder_output_length,
).astype(np.float32),
)
workspace.FeedBlob(
'external/LSTMWithAttention/weighted_encoder_outputs',
np.random.randn(
encoder_output_length,
batch_size,
encoder_output_dim,
).astype(np.float32),
)
workspace.FeedBlob(
decoder_input_lengths,
np.random.randint(
0,
decoder_input_length + 1,
size=(batch_size,)
).astype(np.int32))
workspace.FeedBlob(
initial_decoder_hidden_state,
np.random.randn(1, batch_size, decoder_state_dim).astype(np.float32)
)
workspace.FeedBlob(
initial_decoder_cell_state,
np.random.randn(1, batch_size, decoder_state_dim).astype(np.float32)
)
workspace.FeedBlob(
initial_attention_weighted_encoder_context,
np.random.randn(
1, batch_size, encoder_output_dim).astype(np.float32)
)
inputs = [workspace.FetchBlob(name) for name in op.input]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref,
grad_reference=None,
output_to_grad=None,
outputs_to_check=list(range(6)),
)
gradients_to_check = [
index for (index, input_name) in enumerate(op.input)
if input_name != 'decoder_input_lengths'
]
for param in gradients_to_check:
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=inputs,
outputs_to_check=param,
outputs_with_grads=[0, 4],
threshold=0.01,
stepsize=0.001,
)
@given(n=st.integers(1, 10),
d=st.integers(1, 10),
t=st.integers(1, 10),
**hu.gcs)
def test_lstm_unit_recurrent_network(self, n, d, t, dc, gc):
op = core.CreateOperator(
'LSTMUnit',
[
'hidden_t_prev',
'cell_t_prev',
'gates_t',
'seq_lengths',
'timestep',
],
['hidden_t', 'cell_t'])
cell_t_prev = np.random.randn(1, n, d).astype(np.float32)
hidden_t_prev = np.random.randn(1, n, d).astype(np.float32)
gates = np.random.randn(1, n, 4 * d).astype(np.float32)
seq_lengths = np.random.randint(1, t + 1, size=(n,)).astype(np.int32)
timestep = np.random.randint(0, t, size=(1,)).astype(np.int32)
inputs = [hidden_t_prev, cell_t_prev, gates, seq_lengths, timestep]
input_device_options = {'timestep': hu.cpu_do}
self.assertDeviceChecks(
dc, op, inputs, [0],
input_device_options=input_device_options)
self.assertReferenceChecks(
gc, op, inputs, lstm_unit,
input_device_options=input_device_options)
for i in range(2):
self.assertGradientChecks(
gc, op, inputs, i, [0, 1],
input_device_options=input_device_options)
@given(input_length=st.integers(2, 5),
dim_in=st.integers(1, 3),
max_num_units=st.integers(1, 3),
num_layers=st.integers(2, 3),
batch_size=st.integers(1, 3))
def test_multi_lstm(
self,
input_length,
dim_in,
max_num_units,
num_layers,
batch_size,
):
model = ModelHelper(name='external')
(
input_sequence,
seq_lengths,
) = model.net.AddExternalInputs(
'input_sequence',
'seq_lengths',
)
dim_out = [
np.random.randint(1, max_num_units + 1)
for _ in range(num_layers)
]
h_all, h_last, c_all, c_last = rnn_cell.LSTM(
model=model,
input_blob=input_sequence,
seq_lengths=seq_lengths,
initial_states=None,
dim_in=dim_in,
dim_out=dim_out,
scope='test',
outputs_with_grads=(0,),
return_params=False,
memory_optimization=False,
forget_bias=0.0,
forward_only=False,
return_last_layer_only=True,
)
workspace.RunNetOnce(model.param_init_net)
seq_lengths_val = np.random.randint(
1,
input_length + 1,
size=(batch_size),
).astype(np.int32)
input_sequence_val = np.random.randn(
input_length,
batch_size,
dim_in,
).astype(np.float32)
workspace.FeedBlob(seq_lengths, seq_lengths_val)
workspace.FeedBlob(input_sequence, input_sequence_val)
hidden_input_list = []
cell_input_list = []
i2h_w_list = []
i2h_b_list = []
gates_w_list = []
gates_b_list = []
for i in range(num_layers):
hidden_input_list.append(
workspace.FetchBlob('test/initial_hidden_state_{}'.format(i)),
)
cell_input_list.append(
workspace.FetchBlob('test/initial_cell_state_{}'.format(i)),
)
i2h_w_list.append(
workspace.FetchBlob('test/layer_{}/i2h_w'.format(i)),
)
i2h_b_list.append(
workspace.FetchBlob('test/layer_{}/i2h_b'.format(i)),
)
gates_w_list.append(
workspace.FetchBlob('test/layer_{}/gates_t_w'.format(i)),
)
gates_b_list.append(
workspace.FetchBlob('test/layer_{}/gates_t_b'.format(i)),
)
workspace.RunNetOnce(model.net)
h_all_calc = workspace.FetchBlob(h_all)
h_last_calc = workspace.FetchBlob(h_last)
c_all_calc = workspace.FetchBlob(c_all)
c_last_calc = workspace.FetchBlob(c_last)
h_all_ref, h_last_ref, c_all_ref, c_last_ref = multi_lstm_reference(
input_sequence_val,
hidden_input_list,
cell_input_list,
i2h_w_list,
i2h_b_list,
gates_w_list,
gates_b_list,
seq_lengths_val,
forget_bias=0.0,
)
h_all_delta = np.abs(h_all_ref - h_all_calc).sum()
h_last_delta = np.abs(h_last_ref - h_last_calc).sum()
c_all_delta = np.abs(c_all_ref - c_all_calc).sum()
c_last_delta = np.abs(c_last_ref - c_last_calc).sum()
self.assertAlmostEqual(h_all_delta, 0.0, places=5)
self.assertAlmostEqual(h_last_delta, 0.0, places=5)
self.assertAlmostEqual(c_all_delta, 0.0, places=5)
self.assertAlmostEqual(c_last_delta, 0.0, places=5)
input_values = {
'input_sequence': input_sequence_val,
'seq_lengths': seq_lengths_val,
}
for param in model.GetParams():
value = workspace.FetchBlob(param)
input_values[str(param)] = value
output_sum = model.net.SumElements(
[h_all],
'output_sum',
average=True,
)
fake_loss = model.net.Tanh(
output_sum,
)
for param in model.GetParams():
gradient_checker.NetGradientChecker.Check(
model.net,
outputs_with_grad=[fake_loss],
input_values=input_values,
input_to_check=str(param),
print_net=False,
step_size=0.0001,
threshold=0.05,
)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from classytags.arguments import Argument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag
from cms.constants import PUBLISHER_STATE_PENDING
from django import template
from django.conf import settings
from django.contrib.admin.views.main import ERROR_FLAG
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
register = template.Library()
CMS_ADMIN_ICON_BASE = "%sadmin/img/" % settings.STATIC_URL
@register.simple_tag(takes_context=True)
def show_admin_menu_for_pages(context, descendants, depth=1):
admin = context['admin']
request = context['request']
if 'tree' in context:
filtered = context['tree']['is_filtered']
else:
filtered = False
rows = admin.get_tree_rows(
request,
pages=descendants,
language=context['preview_language'],
depth=depth,
follow_descendants=not bool(filtered),
)
return mark_safe(''.join(rows))
class TreePublishRow(Tag):
name = "tree_publish_row"
options = Options(
Argument('page'),
Argument('language')
)
def render_tag(self, context, page, language):
page_pending_publication = page.get_publisher_state(language) == PUBLISHER_STATE_PENDING
if page.is_published(language) and not page_pending_publication:
if page.is_dirty(language):
cls = "cms-pagetree-node-state cms-pagetree-node-state-dirty dirty"
text = _("unpublished changes")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-published published"
text = _("published")
else:
page_languages = page.get_languages()
if language in page_languages:
if page_pending_publication:
cls = "cms-pagetree-node-state cms-pagetree-node-state-unpublished-parent unpublishedparent"
text = _("unpublished parent")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-unpublished unpublished"
text = _("unpublished")
else:
cls = "cms-pagetree-node-state cms-pagetree-node-state-empty empty"
text = _("no content")
return mark_safe(
'<span class="cms-hover-tooltip cms-hover-tooltip-left cms-hover-tooltip-delay %s" '
'data-cms-tooltip="%s"></span>' % (cls, force_text(text)))
register.tag(TreePublishRow)
@register.filter
def is_published(page, language):
if page.is_published(language):
return True
page_languages = page.get_languages()
if language in page_languages and page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
return True
return False
@register.filter
def is_dirty(page, language):
return page.is_dirty(language)
@register.filter
def items_are_published(items, language):
"""
Returns False if any of the ancestors of page (and language) are
unpublished, otherwise True.
"""
return all(item.is_published(language) for item in items)
@register.inclusion_tag('admin/cms/page/tree/filter.html')
def render_filter_field(request, field):
params = request.GET.copy()
if ERROR_FLAG in params:
del params['ERROR_FLAG']
lookup_value = params.pop(field.html_name, [''])[-1]
def choices():
for value, label in field.field.choices:
queries = params.copy()
if value:
queries[field.html_name] = value
yield {
'query_string': '?%s' % queries.urlencode(),
'selected': lookup_value == value,
'display': label,
}
return {'field': field, 'choices': choices()}
@register.filter
def boolean_icon(value):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(
'<img src="%sicon-%s.gif" alt="%s" />' % (CMS_ADMIN_ICON_BASE, BOOLEAN_MAPPING.get(value, 'unknown'), value))
@register.filter
def preview_link(page, language):
if settings.USE_I18N:
# Which one of page.get_slug() and page.get_path() is the right
# one to use in this block? They both seem to return the same thing.
try:
# attempt to retrieve the localized path/slug and return
return page.get_absolute_url(language, fallback=False)
except:
# no localized path/slug. therefore nothing to preview. stay on the same page.
# perhaps the user should be somehow notified for this.
return ''
return page.get_absolute_url(language)
class PageSubmitRow(InclusionTag):
name = 'page_submit_row'
template = 'admin/cms/page/submit_row.html'
def get_context(self, context):
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
basic_info = context.get('basic_info', False)
advanced_settings = context.get('advanced_settings', False)
change_advanced_settings = context.get('can_change_advanced_settings', False)
language = context.get('language', '')
filled_languages = context.get('filled_languages', [])
show_buttons = language in filled_languages
if show_buttons:
show_buttons = (basic_info or advanced_settings) and change_advanced_settings
context = {
# TODO check this (old code: opts.get_ordered_objects() )
'onclick_attrib': (opts and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': False,
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': False,
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'basic_info_active': basic_info,
'advanced_settings_active': advanced_settings,
'show_buttons': show_buttons,
'show_save': True,
'language': language,
'language_is_filled': language in filled_languages,
'object_id': context.get('object_id', None),
'opts': opts,
}
return context
register.tag(PageSubmitRow)
def in_filtered(seq1, seq2):
return [x for x in seq1 if x in seq2]
in_filtered = register.filter('in_filtered', in_filtered)
@register.simple_tag
def admin_static_url():
"""
If set, returns the string contained in the setting ADMIN_MEDIA_PREFIX, otherwise returns STATIC_URL + 'admin/'.
"""
return getattr(settings, 'ADMIN_MEDIA_PREFIX', None) or ''.join([settings.STATIC_URL, 'admin/'])
class CMSAdminIconBase(Tag):
name = 'cms_admin_icon_base'
def render_tag(self, context):
return CMS_ADMIN_ICON_BASE
register.tag(CMSAdminIconBase)
@register.inclusion_tag('admin/cms/page/plugin/submit_line.html', takes_context=True)
def submit_row_plugin(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'show_delete_link': context.get('has_delete_permission', False) and change and context.get('show_delete', True),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
| |
import os
import platform
import textwrap
import unittest
import pytest
from parameterized import parameterized_class
from conans.test.assets.genconanfile import GenConanfile
from conans.test.assets.pkg_cmake import pkg_cmake
from conans.test.assets.sources import gen_function_cpp, gen_function_h
from conans.test.assets.visual_project_files import get_vs_project_files
from conans.test.conftest import tools_locations
from conans.test.utils.tools import TestClient
sln_file = r"""
Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 15
VisualStudioVersion = 15.0.28307.757
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "MyProject", "MyProject\MyProject.vcxproj", "{6F392A05-B151-490C-9505-B2A49720C4D9}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "MyApp", "MyApp\MyApp.vcxproj", "{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Release|x64 = Release|x64
Release|x86 = Release|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{6F392A05-B151-490C-9505-B2A49720C4D9}.Debug|x64.ActiveCfg = Debug|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Debug|x64.Build.0 = Debug|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Debug|x86.ActiveCfg = Debug|Win32
{6F392A05-B151-490C-9505-B2A49720C4D9}.Debug|x86.Build.0 = Debug|Win32
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x64.ActiveCfg = Release|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x64.Build.0 = Release|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x86.ActiveCfg = Release|Win32
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x86.Build.0 = Release|Win32
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Debug|x64.ActiveCfg = Debug|x64
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Debug|x64.Build.0 = Debug|x64
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Debug|x86.ActiveCfg = Debug|Win32
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Debug|x86.Build.0 = Debug|Win32
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Release|x64.ActiveCfg = Release|x64
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Release|x64.Build.0 = Release|x64
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Release|x86.ActiveCfg = Release|Win32
{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}.Release|x86.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {DE6E462F-E299-4F9C-951A-F9404EB51521}
EndGlobalSection
EndGlobal
"""
myproject_vcxproj = r"""<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0"
xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<VCProjectVersion>15.0</VCProjectVersion>
<ProjectGuid>{6F392A05-B151-490C-9505-B2A49720C4D9}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>MyProject</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets">
<Import Project="..\conan_Hello3.props" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="MyProject.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
"""
myapp_vcxproj = r"""<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0"
xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|Win32">
<Configuration>Debug</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<VCProjectVersion>15.0</VCProjectVersion>
<ProjectGuid>{B58316C0-C78A-4E9B-AE8F-5D6368CE3840}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>MyApp</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="Shared">
</ImportGroup>
<ImportGroup Label="PropertySheets">
<Import Project="..\conan_Hello1.props" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props"
Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')"
Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<PrecompiledHeader>NotUsing</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<SDLCheck>true</SDLCheck>
<PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<ConformanceMode>true</ConformanceMode>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<GenerateDebugInformation>true</GenerateDebugInformation>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="MyApp.cpp" />
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
"""
vs_versions = [{"vs_version": "15", "msvc_version": "191", "ide_year": "2017", "toolset": "v141"}]
if "17" in tools_locations['visual_studio'] and not tools_locations['visual_studio']['17'].get('disabled', False):
vs_versions.append({"vs_version": "17", "msvc_version": "19.3", "ide_year": "2022", "toolset": "v143"})
@parameterized_class(vs_versions)
@pytest.mark.tool_visual_studio
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
class MSBuildGeneratorTest(unittest.TestCase):
@pytest.mark.slow
@pytest.mark.tool_cmake
def test_msbuild_generator(self):
client = TestClient()
client.save(pkg_cmake("Hello0", "1.0"))
client.run("create . ")
client.save(pkg_cmake("Hello3", "1.0"), clean_first=True)
client.run("create . ")
client.save(pkg_cmake("Hello1", "1.0", ["Hello0/1.0"]), clean_first=True)
client.run("create . ")
conanfile = textwrap.dedent("""
from conans import ConanFile, MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
requires = "Hello1/1.0", "Hello3/1.0"
generators = "MSBuildDeps"
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
myapp_cpp = gen_function_cpp(name="main", msg="MyApp",
includes=["Hello1"], calls=["Hello1"])
myproject_cpp = gen_function_cpp(name="main", msg="MyProject", includes=["Hello3"],
calls=["Hello3"])
files = {"MyProject.sln": sln_file,
"MyProject/MyProject.vcxproj": myproject_vcxproj,
"MyProject/MyProject.cpp": myproject_cpp,
"MyApp/MyApp.vcxproj": myapp_vcxproj,
"MyApp/MyApp.cpp": myapp_cpp,
"conanfile.py": conanfile}
client.save(files, clean_first=True)
client.run("install .")
client.run("build .")
self.assertNotIn("warning MSB4011", client.out)
client.run_command(r"x64\Release\MyProject.exe")
self.assertIn("MyProject: Release!", client.out)
self.assertIn("Hello3: Release!", client.out)
client.run_command(r"x64\Release\MyApp.exe")
self.assertIn("MyApp: Release!", client.out)
self.assertIn("Hello0: Release!", client.out)
self.assertIn("Hello1: Release!", client.out)
def test_install_reference(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . mypkg/0.1@")
client.run("install mypkg/0.1@ -g MSBuildDeps")
self.assertIn("Generator 'MSBuildDeps' calling 'generate()'", client.out)
# https://github.com/conan-io/conan/issues/8163
props = client.load("conan_mypkg_vars_release_x64.props") # default Release/x64
folder = props[props.find("<ConanmypkgRootFolder>")+len("<ConanmypkgRootFolder>")
:props.find("</ConanmypkgRootFolder>")]
self.assertTrue(os.path.isfile(os.path.join(folder, "conaninfo.txt")))
def test_install_reference_gcc(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkg/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
settings = "os", "compiler", "arch", "build_type"
generators = "MSBuildDeps"
requires = "pkg/1.0"
""")
client.save({"conanfile.py": conanfile})
client.run('install . -s os=Windows -s compiler="Visual Studio" '
'-s compiler.version={vs_version}'
' -s compiler.runtime=MD'.format(vs_version=self.vs_version))
self.assertIn("conanfile.py: Generator 'MSBuildDeps' calling 'generate()'", client.out)
props = client.load("conan_pkg_release_x64.props")
self.assertIn('<?xml version="1.0" encoding="utf-8"?>', props)
# This will overwrite the existing one, cause configuration and arch is the same
client.run("install . -s os=Linux -s compiler=gcc -s compiler.version=5.2 '"
"'-s compiler.libcxx=libstdc++")
self.assertIn("conanfile.py: Generator 'MSBuildDeps' calling 'generate()'", client.out)
pkg_props = client.load("conan_pkg.props")
self.assertIn('Project="conan_pkg_release_x64.props"', pkg_props)
def test_no_build_type_error(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . mypkg/0.1@")
client.run("install mypkg/0.1@ -g msbuild -s build_type=None", assert_error=True)
self.assertIn("The 'msbuild' generator requires a 'build_type' setting value", client.out)
def test_custom_configuration(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkg/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuildDeps
class Pkg(ConanFile):
settings = "os", "compiler", "arch", "build_type"
requires = "pkg/1.0"
def generate(self):
ms = MSBuildDeps(self)
ms.configuration = "My"+str(self.settings.build_type)
ms.platform = "My"+str(self.settings.arch)
ms.generate()
""")
client.save({"conanfile.py": conanfile})
client.run('install . -s os=Windows -s compiler="Visual Studio" '
'-s compiler.version={vs_version}'
' -s compiler.runtime=MD'.format(vs_version=self.vs_version))
props = client.load("conan_pkg_myrelease_myx86_64.props")
self.assertIn('<?xml version="1.0" encoding="utf-8"?>', props)
client.run('install . -s os=Windows -s compiler="Visual Studio" '
'-s compiler.version={vs_version}'
' -s compiler.runtime=MD -s arch=x86 '
'-s build_type=Debug'.format(vs_version=self.vs_version))
props = client.load("conan_pkg_mydebug_myx86.props")
self.assertIn('<?xml version="1.0" encoding="utf-8"?>', props)
props = client.load("conan_pkg.props")
self.assertIn("conan_pkg_myrelease_myx86_64.props", props)
self.assertIn("conan_pkg_mydebug_myx86.props", props)
def test_custom_configuration_errors(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkg/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuildDeps
class Pkg(ConanFile):
settings = "os", "compiler", "arch", "build_type"
requires = "pkg/1.0"
def generate(self):
ms = MSBuildDeps(self)
ms.configuration = None
ms.generate()
""")
client.save({"conanfile.py": conanfile})
client.run('install . -s os=Windows -s compiler="Visual Studio"'
' -s compiler.version={vs_version}'
' -s compiler.runtime=MD'.format(vs_version=self.vs_version), assert_error=True)
self.assertIn("MSBuildDeps.configuration is None, it should have a value", client.out)
client.save({"conanfile.py": conanfile.replace("configuration", "platform")})
client.run('install . -s os=Windows -s compiler="Visual Studio"'
' -s compiler.version={vs_version}'
' -s compiler.runtime=MD'.format(vs_version=self.vs_version), assert_error=True)
self.assertIn("MSBuildDeps.platform is None, it should have a value", client.out)
def test_install_transitive(self):
# https://github.com/conan-io/conan/issues/8065
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkga/1.0@")
client.save({"conanfile.py": GenConanfile().with_requires("pkga/1.0")})
client.run("create . pkgb/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile, MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
requires = "pkgb/1.0@", "pkga/1.0"
generators = "msbuild"
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
myapp_cpp = gen_function_cpp(name="main", msg="MyApp")
myproject_cpp = gen_function_cpp(name="main", msg="MyProject")
files = {"MyProject.sln": sln_file,
"MyProject/MyProject.vcxproj": myproject_vcxproj.replace("conan_Hello3.props",
"conandeps.props"),
"MyProject/MyProject.cpp": myproject_cpp,
"MyApp/MyApp.vcxproj": myapp_vcxproj.replace("conan_Hello1.props",
"conandeps.props"),
"MyApp/MyApp.cpp": myapp_cpp,
"conanfile.py": conanfile}
client.save(files, clean_first=True)
client.run("install .")
self.assertIn("'msbuild' has been deprecated and moved.", client.out)
client.run("build .")
self.assertNotIn("warning MSB4011", client.out)
def test_install_build_requires(self):
# https://github.com/conan-io/conan/issues/8170
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . tool/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile, load
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
build_requires = "tool/1.0"
generators = "MSBuildDeps"
def build(self):
deps = load("conandeps.props")
assert "conan_tool.props" not in deps
self.output.info("Conan_tools.props not in deps")
""")
client.save({"conanfile.py": conanfile})
client.run("install .")
deps = client.load("conandeps.props")
self.assertNotIn("conan_tool.props", deps)
client.run("create . pkg/0.1@")
self.assertIn("Conan_tools.props not in deps", client.out)
def test_install_transitive_build_requires(self):
# https://github.com/conan-io/conan/issues/8170
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("export . dep/1.0@")
client.run("export . tool_build/1.0@")
client.run("export . tool_test/1.0@")
conanfile = GenConanfile().with_requires("dep/1.0").with_build_requires("tool_build/1.0").\
with_build_requirement("tool_test/1.0", force_host_context=True)
client.save({"conanfile.py": conanfile})
client.run("export . pkg/1.0@")
client.save({"conanfile.py": GenConanfile().
with_settings("os", "compiler", "arch", "build_type").
with_requires("pkg/1.0")}, clean_first=True)
client.run("install . -g MSBuildDeps -pr:b=default -pr:h=default --build=missing")
pkg = client.load("conan_pkg_release_x64.props")
assert "conan_dep.props" in pkg
assert "tool_test" not in pkg # test requires are not there
assert "tool_build" not in pkg
@pytest.mark.parametrize("pattern,exclude_a,exclude_b",
[("['*']", True, True),
("['pkga']", True, False),
("['pkgb']", False, True),
("['pkg*']", True, True),
("['pkga', 'pkgb']", True, True),
("['*a', '*b']", True, True),
("['nonexist']", False, False),
])
def test_exclude_code_analysis(pattern, exclude_a, exclude_b):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . pkga/1.0@")
client.run("create . pkgb/1.0@")
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
requires = "pkgb/1.0@", "pkga/1.0"
generators = "msbuild"
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
profile = textwrap.dedent("""
include(default)
build_type=Release
arch=x86_64
[conf]
tools.microsoft.msbuilddeps:exclude_code_analysis = %s
""" % pattern)
client.save({"conanfile.py": conanfile,
"profile": profile})
client.run("install . --profile profile")
depa = client.load("conan_pkga_release_x64.props")
depb = client.load("conan_pkgb_release_x64.props")
if exclude_a:
inc = "$(ConanpkgaIncludeDirectories)"
ca_exclude = "<CAExcludePath>%s;$(CAExcludePath)</CAExcludePath>" % inc
assert ca_exclude in depa
else:
assert "CAExcludePath" not in depa
if exclude_b:
inc = "$(ConanpkgbIncludeDirectories)"
ca_exclude = "<CAExcludePath>%s;$(CAExcludePath)</CAExcludePath>" % inc
assert ca_exclude in depb
else:
assert "CAExcludePath" not in depb
@pytest.mark.tool_visual_studio(version="15")
@pytest.mark.tool_cmake
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
def test_build_vs_project_with_a_vs2017():
check_build_vs_project_with_a("15")
@pytest.mark.tool_visual_studio(version="17")
@pytest.mark.tool_cmake
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
def test_build_vs_project_with_a_vs2022():
check_build_vs_project_with_a("17")
def check_build_vs_project_with_a(vs_version):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run("create . updep.pkg.team/0.1@")
conanfile = textwrap.dedent("""
from conans import ConanFile, CMake
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
exports = '*'
requires = "updep.pkg.team/0.1@"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
self.copy("*.h", dst="include")
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello.a"]
""")
hello_cpp = gen_function_cpp(name="hello")
hello_h = gen_function_h(name="hello")
cmake = textwrap.dedent("""
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
cmake_minimum_required(VERSION 3.15)
project(MyLib CXX)
set(CMAKE_STATIC_LIBRARY_SUFFIX ".a")
add_library(hello hello.cpp)
""")
client.save({"conanfile.py": conanfile,
"CMakeLists.txt": cmake,
"hello.cpp": hello_cpp,
"hello.h": hello_h})
client.run('create . mydep.pkg.team/0.1@ -s compiler="Visual Studio"'
' -s compiler.version={vs_version}'.format(vs_version=vs_version))
consumer = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
requires = "mydep.pkg.team/0.1@"
generators = "MSBuildDeps", "MSBuildToolchain"
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
files = get_vs_project_files()
main_cpp = gen_function_cpp(name="main", includes=["hello"], calls=["hello"])
files["MyProject/main.cpp"] = main_cpp
files["conanfile.py"] = consumer
props = os.path.join(client.current_folder, "conandeps.props")
old = r'<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />'
new = old + '<Import Project="{props}" />'.format(props=props)
files["MyProject/MyProject.vcxproj"] = files["MyProject/MyProject.vcxproj"].replace(old, new)
client.save(files, clean_first=True)
client.run('install . -s compiler="Visual Studio"'
' -s compiler.version={vs_version}'.format(vs_version=vs_version))
client.run("build .")
client.run_command(r"x64\Release\MyProject.exe")
assert "hello: Release!" in client.out
# TODO: This doesnt' work because get_vs_project_files() don't define NDEBUG correctly
# assert "main: Release!" in client.out
@pytest.mark.tool_visual_studio(version="15")
@pytest.mark.tool_cmake
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
def test_build_vs_project_with_test_requires_vs2017():
check_build_vs_project_with_test_requires("15")
@pytest.mark.tool_visual_studio(version="17")
@pytest.mark.tool_cmake
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
def test_build_vs_project_with_test_requires_vs2022():
check_build_vs_project_with_test_requires("17")
def check_build_vs_project_with_test_requires(vs_version):
client = TestClient()
client.save(pkg_cmake("updep.pkg.team", "0.1"))
client.run("create . -s compiler.version={vs_version}".format(vs_version=vs_version))
client.save(pkg_cmake("mydep.pkg.team", "0.1", requires=["updep.pkg.team/0.1"]),
clean_first=True)
client.run("create . -s compiler.version={vs_version}".format(vs_version=vs_version))
consumer = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuild
class HelloConan(ConanFile):
settings = "os", "build_type", "compiler", "arch"
generators = "MSBuildDeps", "MSBuildToolchain"
def build_requirements(self):
self.build_requires("mydep.pkg.team/0.1", force_host_context=True)
def build(self):
msbuild = MSBuild(self)
msbuild.build("MyProject.sln")
""")
files = get_vs_project_files()
main_cpp = gen_function_cpp(name="main", includes=["mydep_pkg_team"], calls=["mydep_pkg_team"])
files["MyProject/main.cpp"] = main_cpp
files["conanfile.py"] = consumer
props = os.path.join(client.current_folder, "conandeps.props")
old = r'<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />'
new = old + '<Import Project="{props}" />'.format(props=props)
files["MyProject/MyProject.vcxproj"] = files["MyProject/MyProject.vcxproj"].replace(old, new)
client.save(files, clean_first=True)
client.run('install . -s compiler.version={vs_version}'.format(vs_version=vs_version))
client.run("build .")
client.run_command(r"x64\Release\MyProject.exe")
assert "mydep_pkg_team: Release!" in client.out
assert "updep_pkg_team: Release!" in client.out
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
def test_private_transitive():
# https://github.com/conan-io/conan/issues/9514
client = TestClient()
client.save({"dep/conanfile.py": GenConanfile(),
"pkg/conanfile.py": GenConanfile().with_require("dep/0.1", private=True),
"consumer/conanfile.py": GenConanfile().with_requires("pkg/0.1")
.with_settings("os", "build_type", "arch")})
client.run("create dep dep/0.1@")
client.run("create pkg pkg/0.1@")
client.run("install consumer -g MSBuildDeps -s arch=x86_64 -s build_type=Release")
assert "dep/0.1:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Skip" in client.out
deps_props = client.load("conandeps.props")
assert "conan_pkg.props" in deps_props
assert "dep" not in deps_props
pkg_data_props = client.load("conan_pkg_release_x64.props")
assert "conan_dep.props" not in pkg_data_props
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
def test_build_requires():
# https://github.com/conan-io/conan/issues/9545
client = TestClient()
package = "self.copy('*', src=str(self.settings.arch), dst='bin')"
dep = GenConanfile().with_exports("*").with_settings("arch").with_package(package)
consumer = textwrap.dedent("""
from conans import ConanFile
from conan.tools.microsoft import MSBuild
class Pkg(ConanFile):
settings = "os", "compiler", "build_type", "arch"
build_requires = "dep/0.1"
generators = "MSBuildDeps", "MSBuildToolchain"
def build(self):
msbuild = MSBuild(self)
msbuild.build("hello.sln")
""")
hello_vcxproj = textwrap.dedent( r"""<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="15.0"
xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Release|Win32">
<Configuration>Release</Configuration>
<Platform>Win32</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<VCProjectVersion>15.0</VCProjectVersion>
<ProjectGuid>{6F392A05-B151-490C-9505-B2A49720C4D9}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>MyProject</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<PlatformToolset>v141</PlatformToolset>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>Unicode</CharacterSet>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="PropertySheets">
<Import Project="..\conandeps.props" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<ItemGroup>
<CustomBuild Include="data.proto">
<FileType>Document</FileType>
<Outputs>data.proto.h</Outputs>
<Command>dep1tool</Command>
</CustomBuild>
</ItemGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>""")
hello_sln = textwrap.dedent(r"""
Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 15
VisualStudioVersion = 15.0.28307.757
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "MyProject", "MyProject\MyProject.vcxproj", "{6F392A05-B151-490C-9505-B2A49720C4D9}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Release|x64 = Release|x64
Release|x86 = Release|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x64.ActiveCfg = Release|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x64.Build.0 = Release|x64
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x86.ActiveCfg = Release|Win32
{6F392A05-B151-490C-9505-B2A49720C4D9}.Release|x86.Build.0 = Release|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {DE6E462F-E299-4F9C-951A-F9404EB51521}
EndGlobalSection
EndGlobal
""")
client.save({"dep/conanfile.py": dep,
"dep/x86/dep1tool.bat": "@echo Invoking 32bit dep_1 build tool",
"dep/x86_64/dep1tool.bat": "@echo Invoking 64bit dep_1 build tool",
"consumer/conanfile.py": consumer,
"consumer/hello.sln": hello_sln,
"consumer/MyProject/MyProject.vcxproj": hello_vcxproj,
"consumer/MyProject/data.proto": "dataproto"})
client.run("create dep dep/0.1@ -s arch=x86")
client.run("create dep dep/0.1@ -s arch=x86_64")
with client.chdir("consumer"):
client.run('install . -s compiler="Visual Studio" -s compiler.version=15 '
" -s arch=x86_64 -s build_type=Release")
assert "dep/0.1:c0519e2d9702ec12d057bb15adb7a02baaf18107 - Cache" in client.out
deps_props = client.load("conandeps.props")
assert "conan_dep_build.props" in deps_props
client.run("build .")
assert "Invoking 64bit dep_1 build tool" in client.out
client.run('install . -s compiler="Visual Studio" -s compiler.version=15 '
" -s arch=x86 -s build_type=Release")
client.run("build .")
assert "Invoking 32bit dep_1 build tool" in client.out
# Make sure it works with 2 profiles too
client.run('install . -s compiler="Visual Studio" -s compiler.version=15 '
" -s arch=x86_64 -s build_type=Release -s:b os=Windows -s:h os=Windows")
client.run("build .")
assert "Invoking 64bit dep_1 build tool" in client.out
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires MSBuild")
def test_build_requires_transitives():
""" The tool-requires should not bring transitive dependencies, they will conflict and
are useless for linking
"""
# https://github.com/conan-io/conan/issues/10222
c = TestClient()
c.save({"dep/conanfile.py": GenConanfile("dep", "0.1"),
"tool/conanfile.py": GenConanfile("tool", "0.1").with_requires("dep/0.1"),
"consumer/conanfile.py":
GenConanfile().with_settings("os", "compiler", "build_type", "arch")
.with_build_requires("tool/0.1")})
c.run("create dep")
c.run("create tool")
c.run("install consumer -g MSBuildDeps")
tool = c.load("conan_tool_build_release_x64.props")
assert "conan_dep" not in tool
tool_vars = c.load("conan_tool_build_vars_release_x64.props")
assert "<Conantool_buildDependencies></Conantool_buildDependencies>" in tool_vars
| |
"""
Grants are the heart of OAuth 2.0. Each Grant defines one way for a client to
retrieve an authorization. They are defined in
`Section 4 <http://tools.ietf.org/html/rfc6749#section-4>`_ of the OAuth 2.0
spec.
OAuth 2.0 comes in two flavours of how an access token is issued:
two-legged and three-legged auth. To avoid confusion they are explained in
short here.
Three-legged OAuth
------------------
The "three" symbolizes the parties that are involved:
* The client that wants to access a resource on behalf of the user.
* The user who grants access to her resources.
* The server that issues the access token if the user allows it.
Two-legged OAuth
----------------
The two-legged OAuth process differs from the three-legged process by one
missing participant. The user cannot allow or deny access.
So there are two remaining parties:
* The client that wants to access a resource.
* The server that issues the access.
"""
from oauth2.error import OAuthInvalidError, UserNotAuthenticated, \
AccessTokenNotFound, UserIdentifierMissingError, RedirectUriUnknown, \
AuthCodeNotFound, InvalidSiteAdapter
from oauth2.compatibility import urlencode, quote
import json
import time
from oauth2.datatype import AuthorizationCode, AccessToken
from oauth2.web import Response, AuthorizationCodeGrantSiteAdapter, \
ImplicitGrantSiteAdapter, ResourceOwnerGrantSiteAdapter
def encode_scopes(scopes, use_quote=False):
"""
Creates a string out of a list of scopes.
:param scopes: A list of scopes
:param use_quote: Boolean flag indicating whether the string should be quoted
:return: Scopes as a string
"""
scopes_as_string = Scope.separator.join(scopes)
if use_quote:
return quote(scopes_as_string)
return scopes_as_string
def json_error_response(error, response, status_code=400):
"""
Formats an error as a response containing a JSON body.
"""
msg = {"error": error.error, "error_description": error.explanation}
response.status_code = status_code
response.add_header("Content-Type", "application/json")
response.body = json.dumps(msg)
return response
def json_success_response(data, response):
"""
Formats the response of a successful token request as JSON.
Also adds default headers and status code.
"""
response.body = json.dumps(data)
response.status_code = 200
response.add_header("Content-Type", "application/json")
response.add_header("Cache-Control", "no-store")
response.add_header("Pragma", "no-cache")
class ResponseTypeGrant(object):
def error_response(self, response):
pass
class Scope(object):
"""
Handling of the "scope" parameter in a request.
If ``available`` and ``default`` are both ``None``, the "scope" parameter
is ignored (the default).
:param available: A list of strings each defining one supported scope.
:param default: Value to fall back to in case no scope is present in a
request.
"""
separator = " "
def __init__(self, available=None, default=None):
self.scopes = []
self.send_back = False
if isinstance(available, list):
self.available_scopes = available
else:
self.available_scopes = []
self.default = default
def compare(self, previous_scopes):
"""
Compares the scopes read from request with previously issued scopes.
:param previous_scopes: A list of scopes.
:return: ``True``
"""
for scope in self.scopes:
if scope not in previous_scopes:
raise OAuthInvalidError(
error="invalid_scope",
explanation="Invalid scope parameter in request")
return True
def parse(self, request, source):
"""
Parses scope value in given request.
Expects the value of the "scope" parameter in request to be a string
where each requested scope is separated by a white space::
# One scope requested
"profile_read"
# Multiple scopes
"profile_read profile_write"
:param request: An instance of :class:`oauth2.web.Request`.
:param source: Where to read the scope from. Pass "body" in case of a
application/x-www-form-urlencoded body and "query" in
case the scope is supplied as a query parameter in the
URL of a request.
"""
if source == "body":
req_scope = request.post_param("scope")
elif source == "query":
req_scope = request.get_param("scope")
else:
raise ValueError("Unknown scope source '" + source + "'")
if req_scope is None:
if self.default is not None:
self.scopes = [self.default]
self.send_back = True
return
elif len(self.available_scopes) != 0:
raise OAuthInvalidError(
error="invalid_scope",
explanation="Missing scope parameter in request")
else:
return
req_scopes = req_scope.split(self.separator)
self.scopes = [scope for scope in req_scopes
if scope in self.available_scopes]
if len(self.scopes) == 0 and self.default is not None:
self.scopes = [self.default]
self.send_back = True
class ScopeGrant(object):
"""
Handling of scopes in the OAuth 2.0 flow.
Inherited by all grants that need to support scopes.
:param default_scope: The scope identifier that is returned by default.
(optional)
:param scopes: A list of strings identifying the scopes that the
grant supports.
:param scope_class: The class that does the actual handling in a request.
Default: :class:`oauth2.grant.Scope`.
"""
def __init__(self, default_scope=None, scopes=None, scope_class=Scope,
**kwargs):
self.default_scope = default_scope
self.scopes = scopes
self.scope_class = scope_class
super(ScopeGrant, self).__init__(**kwargs)
def _create_scope_handler(self):
return self.scope_class(available=self.scopes,
default=self.default_scope)
class GrantHandler(object):
"""
Base class every oauth2 handler can extend.
"""
def process(self, request, response, environ):
"""
Handles the logic of how a user gets an access token.
This includes steps like calling the implementation of a `SiteAdapter`
if the user is authorized or generating a new access token.
This method uses data read in `read_validate_params`.
"""
raise NotImplementedError
def read_validate_params(self, request):
"""
Reads and validates the incoming data.
"""
raise NotImplementedError
def handle_error(self, error, response):
"""
Takes all the actions necessary to return an error response in the
format defined for a specific grant handler.
"""
raise NotImplementedError
class GrantHandlerFactory(object):
"""
Base class every handler factory can extend.
This class defines the basic interface of each Grant.
"""
def __call__(self, request, server):
raise NotImplementedError
class AuthRequestMixin(object):
"""
Generalization of reading and validating an incoming request used by
`oauth2.grant.AuthorizationCodeAuthHandler` and
`oauth2.grant.ImplicitGrantHandler`.
"""
def __init__(self, client_authenticator, scope_handler, token_generator,
**kwargs):
self.client = None
self.state = None
self.client_authenticator = client_authenticator
self.scope_handler = scope_handler
self.token_generator = token_generator
super(AuthRequestMixin, self).__init__(**kwargs)
def read_validate_params(self, request):
"""
Reads and validates data in an incoming request as required by
the Authorization Request of the Authorization Code Grant and the
Implicit Grant.
"""
self.client = self.client_authenticator.by_identifier(request)
response_type = request.get_param("response_type")
if self.client.response_type_supported(response_type) is False:
raise OAuthInvalidError(error="unauthorized_client")
self.state = request.get_param("state")
self.scope_handler.parse(request, "query")
return True
class AuthorizeMixin(object):
"""
Used by all grants that involve user interaction.
"""
def __init__(self, site_adapter, **kwargs):
self.site_adapter = site_adapter
super(AuthorizeMixin, self).__init__(**kwargs)
def authorize(self, request, response, environ, scopes):
"""
Controls all steps to authorize a request by a user.
:param request: The incoming :class:`oauth2.web.Request`
:param response: The :class:`oauth2.web.Response` that will be
returned eventually
:param environ: The environment variables of this request
:param scopes: The scopes requested by an application
:return: A tuple containing (`dict`, user_id) or the response.
"""
if self.site_adapter.user_has_denied_access(request) is True:
raise OAuthInvalidError(error="access_denied",
explanation="Authorization denied by user")
try:
result = self.site_adapter.authenticate(request, environ, scopes,
self.client)
return self.sanitize_return_value(result)
except UserNotAuthenticated:
return self.site_adapter.render_auth_page(request, response,
environ, scopes,
self.client)
@staticmethod
def sanitize_return_value(value):
if isinstance(value, tuple) and len(value) is 2:
return value
return value, None
class AccessTokenMixin(object):
"""
Used by grants that handle refresh token and unique token.
"""
def __init__(self, access_token_store, token_generator,
unique_token=False, **kwargs):
self.access_token_store = access_token_store
self.token_generator = token_generator
self.unique_token = unique_token
super(AccessTokenMixin, self).__init__(**kwargs)
def create_token(self, client_id, data, grant_type, scopes, user_id):
if self.unique_token:
if user_id is None:
raise UserIdentifierMissingError
try:
access_token = self.access_token_store. \
fetch_existing_token_of_user(
client_id,
grant_type,
user_id)
if (access_token.scopes == scopes
and access_token.is_expired() is False):
token_data = {"access_token": access_token.token,
"token_type": "Bearer"}
if access_token.refresh_token is not None:
token_data["refresh_token"] = access_token.refresh_token
token_data["expires_in"] = access_token.expires_in
return token_data
except AccessTokenNotFound:
pass
token_data = self.token_generator.create_access_token_data(grant_type)
access_token = AccessToken(client_id=client_id, data=data,
grant_type=grant_type,
token=token_data["access_token"],
scopes=scopes,
user_id=user_id)
if "refresh_token" in token_data:
expires_at = int(time.time()) + token_data["expires_in"]
access_token.expires_at = expires_at
access_token.refresh_token = token_data["refresh_token"]
refresh_expires_in = self.token_generator.refresh_expires_in
refresh_expires_at = int(time.time()) + refresh_expires_in
access_token.refresh_expires_at = refresh_expires_at
self.access_token_store.save_token(access_token)
return token_data
class SiteAdapterMixin(object):
"""
Mixed in by Grant classes that require a site adapter.
A concrete class must set the class attribute ``site_adapter_class`` that
contains a reference to the site adapter class that this class expects.
"""
site_adapter_class = None
def __init__(self, site_adapter, **kwargs):
if isinstance(site_adapter, self.site_adapter_class) is False:
raise InvalidSiteAdapter(
"Site adapter must inherit from class '{0}'"
.format(self.site_adapter_class.__name__)
)
self.site_adapter = site_adapter
super(SiteAdapterMixin, self).__init__(**kwargs)
class AuthorizationCodeAuthHandler(AuthorizeMixin, AuthRequestMixin,
GrantHandler):
"""
Implementation of the first step of the Authorization Code Grant
(three-legged).
"""
token_expiration = 600
def __init__(self, auth_token_store, **kwargs):
self.auth_code_store = auth_token_store
super(AuthorizationCodeAuthHandler, self).__init__(**kwargs)
def process(self, request, response, environ):
"""
Generates a new authorization token.
A form to authorize the access of the application can be displayed with
the help of `oauth2.web.SiteAdapter`.
"""
data = self.authorize(request, response, environ,
self.scope_handler.scopes)
if isinstance(data, Response):
return data
code = self.token_generator.generate()
expires = int(time.time()) + self.token_expiration
auth_code = AuthorizationCode(client_id=self.client.identifier,
code=code, expires_at=expires,
redirect_uri=self.client.redirect_uri,
scopes=self.scope_handler.scopes,
data=data[0], user_id=data[1])
self.auth_code_store.save_code(auth_code)
response.add_header("Location", self._generate_location(code))
response.body = ""
response.status_code = 302
return response
def handle_error(self, error, response):
"""
Redirects the client in case an error in the auth process occurred.
"""
query_params = {"error": error.error}
query = urlencode(query_params)
location = "%s?%s" % (self.client.redirect_uri, query)
response.status_code = 302
response.body = ""
response.add_header("Location", location)
return response
def _generate_location(self, code):
query = "code=" + code
if self.state is not None:
query += "&state=" + quote(self.state)
return "%s?%s" % (self.client.redirect_uri, query)
class AuthorizationCodeTokenHandler(AccessTokenMixin, GrantHandler):
"""
Implementation of the second step of the Authorization Code Grant
(three-legged).
"""
def __init__(self, auth_token_store, client_authenticator, **kwargs):
self.client = None
self.code = None
self.data = {}
self.redirect_uri = None
self.scopes = []
self.user_id = None
self.auth_code_store = auth_token_store
self.client_authenticator = client_authenticator
super(AuthorizationCodeTokenHandler, self).__init__(**kwargs)
def read_validate_params(self, request):
"""
Reads and validates the data from the incoming request.
A valid request is issued via POST consists of the following form-encoded body:
client_id - Identifier of the requesting client (required)
client_secret - Secret phrase generated by the auth system (required)
code - Authorization code acquired in the Authorization Request (required)
redirect_uri - URI that the OAuth2 server should redirect to (optional)
"""
self._read_params(request)
self._validate_code()
return True
def process(self, request, response, environ):
"""
Generates a new access token and returns it.
Returns the access token and the type of the token as JSON.
Calls `oauth2.store.AccessTokenStore` to persist the token.
"""
token_data = self.create_token(
client_id=self.client.identifier,
data=self.data,
grant_type=AuthorizationCodeGrant.grant_type,
scopes=self.scopes,
user_id=self.user_id)
self.auth_code_store.delete_code(self.code)
if self.scopes:
token_data["scope"] = encode_scopes(self.scopes)
json_success_response(data=token_data, response=response)
return response
def handle_error(self, error, response):
return json_error_response(error, response)
def _read_params(self, request):
self.client = self.client_authenticator.by_identifier_secret(request)
self.code = request.post_param("code")
self.redirect_uri = request.post_param("redirect_uri")
if self.code is None or self.redirect_uri is None:
raise OAuthInvalidError(
error="invalid_request",
explanation="Missing required parameter in request")
try:
self.client.redirect_uri = self.redirect_uri
except RedirectUriUnknown:
raise OAuthInvalidError(
error="invalid_request",
explanation="Invalid redirect_uri parameter")
def _validate_code(self):
try:
stored_code = self.auth_code_store.fetch_by_code(self.code)
except AuthCodeNotFound:
raise OAuthInvalidError(
error="invalid_request",
explanation="Invalid authorization code parameter")
if stored_code.code != self.code:
raise OAuthInvalidError(
error="invalid_grant",
explanation="Invalid code parameter in request")
if stored_code.redirect_uri != self.redirect_uri:
raise OAuthInvalidError(
error="invalid_request",
explanation="Invalid redirect_uri parameter")
if stored_code.is_expired():
raise OAuthInvalidError(
error="invalid_grant",
explanation="Authorization code has expired")
self.data = stored_code.data
self.scopes = stored_code.scopes
self.user_id = stored_code.user_id
class AuthorizationCodeGrant(GrantHandlerFactory, ScopeGrant,
SiteAdapterMixin):
"""
Implementation of the Authorization Code Grant auth flow.
This is a three-legged OAuth process.
Register an instance of this class with
:class:`oauth2.AuthorizationController` like this::
auth_controller = AuthorizationController()
auth_controller.add_grant_type(AuthorizationCodeGrant())
.. versionchanged:: 1.0.0
Require parameter ``site_adapter``.
"""
grant_type = "authorization_code"
site_adapter_class = AuthorizationCodeGrantSiteAdapter
def __init__(self, unique_token=False, expires_in=0, **kwargs):
self.unique_token = unique_token
self.expires_in = expires_in
super(AuthorizationCodeGrant, self).__init__(**kwargs)
def __call__(self, request, server):
"""
:param request: Incoming request
:type request: oauth2.web.Request
:param server: The OAuth2 provider instance
.. versionchanged:: 1.0.0
Check the HTTP method of a request
"""
if (request.method == "POST"
and request.post_param("grant_type") == "authorization_code"
and request.path == server.token_path):
return AuthorizationCodeTokenHandler(
access_token_store=server.access_token_store,
auth_token_store=server.auth_code_store,
client_authenticator=server.client_authenticator,
token_generator=server.token_generator,
unique_token=self.unique_token)
if (request.method == "GET"
and request.get_param("response_type") == "code"
and request.path == server.authorize_path):
scope_handler = self._create_scope_handler()
return AuthorizationCodeAuthHandler(
auth_token_store=server.auth_code_store,
client_authenticator=server.client_authenticator,
scope_handler=scope_handler,
site_adapter=self.site_adapter,
token_generator=server.token_generator)
return None
class ImplicitGrant(GrantHandlerFactory, ScopeGrant, SiteAdapterMixin):
"""
Implementation of the Implicit Grant auth flow.
This is a three-legged OAuth process.
Register an instance of this class with
:class:`oauth2.AuthorizationController` like this::
auth_controller = AuthorizationController()
auth_controller.add_grant_type(ImplicitGrant())
.. versionchanged:: 1.0.0
Require parameter ``site_adapter``.
"""
grant_type = "implicit"
site_adapter_class = ImplicitGrantSiteAdapter
def __call__(self, request, server):
response_type = request.get_param("response_type")
if (response_type == "token"
and request.path == server.authorize_path):
return ImplicitGrantHandler(
access_token_store=server.access_token_store,
client_authenticator=server.client_authenticator,
scope_handler=self._create_scope_handler(),
site_adapter=self.site_adapter,
token_generator=server.token_generator)
return None
class ImplicitGrantHandler(AuthorizeMixin, AuthRequestMixin, GrantHandler):
def __init__(self, access_token_store, **kwargs):
self.access_token_store = access_token_store
super(ImplicitGrantHandler, self).__init__(**kwargs)
def process(self, request, response, environ):
data = self.authorize(request, response, environ,
self.scope_handler.scopes)
if isinstance(data, Response):
return data
token = self.token_generator.generate()
access_token = AccessToken(client_id=self.client.identifier,
grant_type=ImplicitGrant.grant_type,
token=token, data=data[0],
scopes=self.scope_handler.scopes)
self.access_token_store.save_token(access_token)
return self._redirect_access_token(response, token)
def handle_error(self, error, response):
redirect_location = "%s#error=%s" % (self.client.redirect_uri,
error.error)
response.add_header("Location", redirect_location)
response.body = ""
response.status_code = 302
return response
def _redirect_access_token(self, response, token):
uri_with_fragment = "{0}#access_token={1}&token_type=bearer". \
format(self.client.redirect_uri, token)
if self.state is not None:
uri_with_fragment += "&state=" + quote(self.state)
if self.scope_handler.send_back is True:
scopes_as_string = encode_scopes(self.scope_handler.scopes,
use_quote=True)
uri_with_fragment += "&scope=" + scopes_as_string
response.status_code = 302
response.add_header("Location", uri_with_fragment)
response.content = ""
return response
class ResourceOwnerGrant(GrantHandlerFactory, ScopeGrant, SiteAdapterMixin):
"""
Implementation of the Resource Owner Password Credentials Grant auth flow.
In this Grant a user provides a user name and a password.
An access token is issued if the auth server was able to verify the user
by her credentials.
Register an instance of this class with
:class:`oauth2.AuthorizationController` like this::
auth_controller = AuthorizationController()
auth_controller.add_grant_type(ResourceOwnerGrant())
.. versionchanged:: 1.0.0
Require parameter ``site_adapter``.
"""
grant_type = "password"
site_adapter_class = ResourceOwnerGrantSiteAdapter
def __init__(self, unique_token=False, expires_in=0, **kwargs):
self.unique_token = unique_token
self.expires_in = expires_in
super(ResourceOwnerGrant, self).__init__(**kwargs)
def __call__(self, request, server):
"""
Checks if the incoming request can be handled by the
ResourceOwnerGrantHandler and returns an instance of it.
"""
if request.post_param("grant_type") != self.grant_type:
return None
return ResourceOwnerGrantHandler(
access_token_store=server.access_token_store,
client_authenticator=server.client_authenticator,
scope_handler=self._create_scope_handler(),
site_adapter=self.site_adapter,
token_generator=server.token_generator,
unique_token=self.unique_token)
class ResourceOwnerGrantHandler(GrantHandler, AccessTokenMixin):
"""
Class for handling Resource Owner authorization requests.
See http://tools.ietf.org/html/rfc6749#section-4.3
"""
OWNER_NOT_AUTHENTICATED = "Unable to authenticate resource owner"
def __init__(self, client_authenticator, scope_handler, site_adapter,
**kwargs):
"""
:param client_authenticator: Client authenticator
:type client_authenticator: oauth2.client_authenticator.ClientAuthenticator
:param scope_handler: Scope handler
:type scope_handler: oauth2.grant.Scope
:param site_adapter: Site adapter
:type site_adapter: oauth2.web.SiteAdapter
"""
self.client_authenticator = client_authenticator
self.scope_handler = scope_handler
self.site_adapter = site_adapter
self.client = None
self.password = None
self.username = None
super(ResourceOwnerGrantHandler, self).__init__(**kwargs)
def process(self, request, response, environ):
"""
Takes the incoming request, asks the concrete SiteAdapter to validate
it and issues a new access token that is returned to the client on
successful validation.
"""
try:
data = self.site_adapter.authenticate(request, environ,
self.scope_handler.scopes,
self.client)
data = AuthorizeMixin.sanitize_return_value(data)
except UserNotAuthenticated:
raise OAuthInvalidError(error="invalid_client",
explanation=self.OWNER_NOT_AUTHENTICATED)
if isinstance(data, Response):
return data
token_data = self.create_token(
client_id=self.client.identifier,
data=data[0],
grant_type=ResourceOwnerGrant.grant_type,
scopes=self.scope_handler.scopes,
user_id=data[1])
if self.scope_handler.send_back:
token_data["scope"] = encode_scopes(self.scope_handler.scopes)
json_success_response(data=token_data, response=response)
return response
def read_validate_params(self, request):
"""
Checks if all incoming parameters meet the expected values.
"""
self.client = self.client_authenticator.by_identifier_secret(request)
self.password = request.post_param("password")
self.username = request.post_param("username")
self.scope_handler.parse(request=request, source="body")
return True
def handle_error(self, error, response):
status_code = 400
if error.explanation == self.OWNER_NOT_AUTHENTICATED:
status_code = 401
return json_error_response(error, response, status_code=status_code)
class RefreshToken(GrantHandlerFactory, ScopeGrant):
"""
Handles requests for refresh tokens as defined in
http://tools.ietf.org/html/rfc6749#section-6.
Adding a Refresh Token to the :class:`oauth2.AuthorizationController` like
this::
auth_controller = AuthorizationController()
auth_controller.add_grant_type(ResourceOwnerGrant(tokens_expire=600))
auth_controller.add_grant_type(RefreshToken(tokens_expire=1200))
will cause :class:`oauth2.grant.AuthorizationCodeGrant` and
:class:`oauth2.grant.ResourceOwnerGrant` to include a refresh token and
expiration in the response.
If tokens_expire == 0, the tokens will never expire.
"""
grant_type = "refresh_token"
def __init__(self, expires_in, reissue_refresh_tokens=False, **kwargs):
self.refresh_expires_in = expires_in
self.reissue_refresh_tokens = reissue_refresh_tokens
super(RefreshToken, self).__init__(**kwargs)
def __call__(self, request, server):
"""
Determines if the current request requests a refresh token.
:return: An instance of :class:`RefreshTokenHandler`.
"""
if request.path != server.token_path:
return None
if request.post_param("grant_type") != "refresh_token":
return None
return RefreshTokenHandler(
access_token_store=server.access_token_store,
client_authenticator=server.client_authenticator,
scope_handler=self._create_scope_handler(),
token_generator=server.token_generator,
reissue_refresh_tokens=self.reissue_refresh_tokens
)
class RefreshTokenHandler(GrantHandler):
"""
Validates an incoming request and issues a new access token.
"""
def __init__(self, access_token_store, client_authenticator,
scope_handler, token_generator,
reissue_refresh_tokens=False):
self.access_token_store = access_token_store
self.client_authenticator = client_authenticator
self.scope_handler = scope_handler
self.token_generator = token_generator
self.client = None
self.data = {}
self.refresh_grant_type = None
self.refresh_token = None
self.user_id = None
self.reissue_refresh_tokens = reissue_refresh_tokens
def process(self, request, response, environ):
"""
Create a new access token.
:param request: The incoming :class:`oauth2.web.Request`.
:param response: The :class:`oauth2.web.Response` that will be returned
to the client.
:param environ: A ``dict`` containing data of the environment.
:return: :class:`oauth2.web.Response`
"""
token_data = self.token_generator.create_access_token_data(self.refresh_grant_type)
expires_at = int(time.time()) + token_data["expires_in"]
access_token = AccessToken(client_id=self.client.identifier,
token=token_data["access_token"],
grant_type=self.refresh_grant_type,
data=self.data, expires_at=expires_at,
scopes=self.scope_handler.scopes,
user_id=self.user_id)
if self.reissue_refresh_tokens:
self.access_token_store.delete_refresh_token(self.refresh_token)
access_token.refresh_token = token_data["refresh_token"]
refresh_expires_in = self.token_generator.refresh_expires_in
refresh_expires_at = int(time.time()) + refresh_expires_in
access_token.refresh_expires_at = refresh_expires_at
else:
del token_data["refresh_token"]
self.access_token_store.save_token(access_token)
json_success_response(data=token_data, response=response)
return response
def read_validate_params(self, request):
"""
Validate the incoming request.
:param request: The incoming :class:`oauth2.web.Request`.
:return: Returns ``True`` if data is valid.
:raises: :class:`oauth2.error.OAuthInvalidError`
"""
self.refresh_token = request.post_param("refresh_token")
if self.refresh_token is None:
raise OAuthInvalidError(
error="invalid_request",
explanation="Missing refresh_token in request body")
self.client = self.client_authenticator.by_identifier_secret(request)
try:
access_token = self.access_token_store.fetch_by_refresh_token(
self.refresh_token
)
except AccessTokenNotFound:
raise OAuthInvalidError(error="invalid_request",
explanation="Invalid refresh token")
refresh_token_expires_at = access_token.refresh_expires_at
self.refresh_grant_type = access_token.grant_type
if refresh_token_expires_at != 0 and \
refresh_token_expires_at < int(time.time()):
raise OAuthInvalidError(error="invalid_request",
explanation="Invalid refresh token")
self.data = access_token.data
self.user_id = access_token.user_id
self.scope_handler.parse(request, "body")
self.scope_handler.compare(access_token.scopes)
return True
def handle_error(self, error, response):
return json_error_response(error, response)
class ClientCredentialsGrant(GrantHandlerFactory, ScopeGrant):
grant_type = "client_credentials"
def __call__(self, request, server):
if request.path != server.token_path:
return None
if request.post_param("grant_type") == self.grant_type:
return ClientCredentialsHandler(
access_token_store=server.access_token_store,
client_authenticator=server.client_authenticator,
scope_handler=self._create_scope_handler(),
token_generator=server.token_generator)
return None
class ClientCredentialsHandler(GrantHandler):
def __init__(self, access_token_store, client_authenticator,
scope_handler, token_generator):
self.access_token_store = access_token_store
self.client_authenticator = client_authenticator
self.scope_handler = scope_handler
self.token_generator = token_generator
self.client = None
def process(self, request, response, environ):
body = {"token_type": "Bearer"}
token = self.token_generator.generate()
expires_in = self.token_generator.expires_in.get(ClientCredentialsGrant.grant_type, None)
if expires_in is None:
expires_at = None
else:
expires_at = int(time.time()) + expires_in
access_token = AccessToken(
client_id=self.client.identifier,
grant_type=ClientCredentialsGrant.grant_type,
token=token,
expires_at=expires_at,
scopes=self.scope_handler.scopes)
self.access_token_store.save_token(access_token)
body["access_token"] = token
if expires_in is not None:
body["expires_in"] = expires_in
if self.scope_handler.send_back:
body["scope"] = encode_scopes(self.scope_handler.scopes)
json_success_response(data=body, response=response)
return response
def read_validate_params(self, request):
self.client = self.client_authenticator.by_identifier_secret(request)
self.scope_handler.parse(request=request, source="body")
def handle_error(self, error, response):
return json_error_response(error, response)
| |
#!/usr/bin/env python3
"""Generate an updated requirements_all.txt."""
import difflib
import importlib
import os
from pathlib import Path
import pkgutil
import re
import sys
from script.hassfest.model import Integration
from homeassistant.util.yaml.loader import load_yaml
COMMENT_REQUIREMENTS = (
"Adafruit_BBIO",
"Adafruit-DHT",
"avion",
"beacontools",
"blinkt",
"bluepy",
"bme680",
"credstash",
"decora",
"envirophat",
"evdev",
"face_recognition",
"i2csense",
"opencv-python-headless",
"py_noaa",
"pybluez",
"pycups",
"PySwitchbot",
"pySwitchmate",
"python-eq3bt",
"python-gammu",
"python-lirc",
"pyuserinput",
"raspihats",
"rpi-rf",
"RPi.GPIO",
"smbus-cffi",
"tensorflow",
"VL53L1X2",
)
IGNORE_PIN = ("colorlog>2.1,<3", "keyring>=9.3,<10.0", "urllib3")
URL_PIN = (
"https://developers.home-assistant.io/docs/"
"creating_platform_code_review.html#1-requirements"
)
CONSTRAINT_PATH = os.path.join(
os.path.dirname(__file__), "../homeassistant/package_constraints.txt"
)
CONSTRAINT_BASE = """
pycryptodome>=3.6.6
# Not needed for our supported Python versions
enum34==1000000000.0.0
# This is a old unmaintained library and is replaced with pycryptodome
pycrypto==1000000000.0.0
"""
IGNORE_PRE_COMMIT_HOOK_ID = ("check-json",)
def has_tests(module: str):
"""Test if a module has tests.
Module format: homeassistant.components.hue
Test if exists: tests/components/hue
"""
path = Path(module.replace(".", "/").replace("homeassistant", "tests"))
if not path.exists():
return False
if not path.is_dir():
return True
# Dev environments might have stale directories around
# from removed tests. Check for that.
content = [f.name for f in path.glob("*")]
# Directories need to contain more than `__pycache__`
# to exist in Git and so be seen by CI.
return content != ["__pycache__"]
def explore_module(package, explore_children):
"""Explore the modules."""
module = importlib.import_module(package)
found = []
if not hasattr(module, "__path__"):
return found
for _, name, _ in pkgutil.iter_modules(module.__path__, package + "."):
found.append(name)
if explore_children:
found.extend(explore_module(name, False))
return found
def core_requirements():
"""Gather core requirements out of setup.py."""
reqs_raw = re.search(
r"REQUIRES = \[(.*?)\]", Path("setup.py").read_text(), re.S
).group(1)
return [x[1] for x in re.findall(r"(['\"])(.*?)\1", reqs_raw)]
def gather_recursive_requirements(domain, seen=None):
"""Recursively gather requirements from a module."""
if seen is None:
seen = set()
seen.add(domain)
integration = Integration(Path(f"homeassistant/components/{domain}"))
integration.load_manifest()
reqs = set(integration.manifest["requirements"])
for dep_domain in integration.manifest["dependencies"]:
reqs.update(gather_recursive_requirements(dep_domain, seen))
return reqs
def comment_requirement(req):
"""Comment out requirement. Some don't install on all systems."""
return any(ign in req for ign in COMMENT_REQUIREMENTS)
def gather_modules():
"""Collect the information."""
reqs = {}
errors = []
gather_requirements_from_manifests(errors, reqs)
gather_requirements_from_modules(errors, reqs)
for key in reqs:
reqs[key] = sorted(reqs[key], key=lambda name: (len(name.split(".")), name))
if errors:
print("******* ERROR")
print("Errors while importing: ", ", ".join(errors))
return None
return reqs
def gather_requirements_from_manifests(errors, reqs):
"""Gather all of the requirements from manifests."""
integrations = Integration.load_dir(Path("homeassistant/components"))
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
errors.append(f"The manifest for integration {domain} is invalid.")
continue
process_requirements(
errors,
integration.manifest["requirements"],
f"homeassistant.components.{domain}",
reqs,
)
def gather_requirements_from_modules(errors, reqs):
"""Collect the requirements from the modules directly."""
for package in sorted(
explore_module("homeassistant.scripts", True)
+ explore_module("homeassistant.auth", True)
):
try:
module = importlib.import_module(package)
except ImportError as err:
print("{}.py: {}".format(package.replace(".", "/"), err))
errors.append(package)
continue
if getattr(module, "REQUIREMENTS", None):
process_requirements(errors, module.REQUIREMENTS, package, reqs)
def process_requirements(errors, module_requirements, package, reqs):
"""Process all of the requirements."""
for req in module_requirements:
if "://" in req:
errors.append(f"{package}[Only pypi dependencies are allowed: {req}]")
if req.partition("==")[1] == "" and req not in IGNORE_PIN:
errors.append(f"{package}[Please pin requirement {req}, see {URL_PIN}]")
reqs.setdefault(req, []).append(package)
def generate_requirements_list(reqs):
"""Generate a pip file based on requirements."""
output = []
for pkg, requirements in sorted(reqs.items(), key=lambda item: item[0]):
for req in sorted(requirements):
output.append(f"\n# {req}")
if comment_requirement(pkg):
output.append(f"\n# {pkg}\n")
else:
output.append(f"\n{pkg}\n")
return "".join(output)
def requirements_all_output(reqs):
"""Generate output for requirements_all."""
output = []
output.append("# Home Assistant core")
output.append("\n")
output.append("\n".join(core_requirements()))
output.append("\n")
output.append(generate_requirements_list(reqs))
return "".join(output)
def requirements_test_output(reqs):
"""Generate output for test_requirements."""
output = []
output.append("# Home Assistant tests, full dependency set\n")
output.append(
f"# Automatically generated by {Path(__file__).name}, do not edit\n\n"
)
output.append("-r requirements_test.txt\n")
filtered = {
requirement: modules
for requirement, modules in reqs.items()
if any(
# Always install requirements that are not part of integrations
not mdl.startswith("homeassistant.components.") or
# Install tests for integrations that have tests
has_tests(mdl)
for mdl in modules
)
}
output.append(generate_requirements_list(filtered))
return "".join(output)
def requirements_pre_commit_output():
"""Generate output for pre-commit dependencies."""
source = ".pre-commit-config.yaml"
pre_commit_conf = load_yaml(source)
reqs = []
for repo in (x for x in pre_commit_conf["repos"] if x.get("rev")):
for hook in repo["hooks"]:
if hook["id"] not in IGNORE_PRE_COMMIT_HOOK_ID:
reqs.append(f"{hook['id']}=={repo['rev']}")
reqs.extend(x for x in hook.get("additional_dependencies", ()))
output = [
f"# Automatically generated "
f"from {source} by {Path(__file__).name}, do not edit",
"",
]
output.extend(sorted(reqs))
return "\n".join(output) + "\n"
def gather_constraints():
"""Construct output for constraint file."""
return (
"\n".join(
sorted(
core_requirements()
+ list(gather_recursive_requirements("default_config"))
)
+ [""]
)
+ CONSTRAINT_BASE
)
def diff_file(filename, content):
"""Diff a file."""
return list(
difflib.context_diff(
[f"{line}\n" for line in Path(filename).read_text().split("\n")],
[f"{line}\n" for line in content.split("\n")],
filename,
"generated",
)
)
def main(validate):
"""Run the script."""
if not os.path.isfile("requirements_all.txt"):
print("Run this from HA root dir")
return 1
data = gather_modules()
if data is None:
return 1
reqs_file = requirements_all_output(data)
reqs_test_file = requirements_test_output(data)
reqs_pre_commit_file = requirements_pre_commit_output()
constraints = gather_constraints()
files = (
("requirements_all.txt", reqs_file),
("requirements_test_pre_commit.txt", reqs_pre_commit_file),
("requirements_test_all.txt", reqs_test_file),
("homeassistant/package_constraints.txt", constraints),
)
if validate:
errors = []
for filename, content in files:
diff = diff_file(filename, content)
if diff:
errors.append("".join(diff))
if errors:
print("ERROR - FOUND THE FOLLOWING DIFFERENCES")
print()
print()
print("\n\n".join(errors))
print()
print("Please run python3 -m script.gen_requirements_all")
return 1
return 0
for filename, content in files:
Path(filename).write_text(content)
return 0
if __name__ == "__main__":
_VAL = sys.argv[-1] == "validate"
sys.exit(main(_VAL))
| |
# -*- coding: utf-8 -*-
# Copyright 2015 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from ooi.tests import fakes
from ooi.tests.middleware import test_middleware
from ooi import utils
def build_occi_server(server):
name = server["name"]
server_id = server["id"]
flavor_id = fakes.flavors[server["flavor"]["id"]]["id"]
flavor_name = fakes.flavors[server["flavor"]["id"]]["name"]
ram = fakes.flavors[server["flavor"]["id"]]["ram"]
cores = fakes.flavors[server["flavor"]["id"]]["vcpus"]
image_id = server["image"]["id"]
status = server["status"].upper()
if status in ("ACTIVE",):
status = "active"
elif status in ("PAUSED", "SUSPENDED", "STOPPED"):
status = "suspended"
else:
status = "inactive"
cats = []
cats.append('compute; '
'scheme="http://schemas.ogf.org/occi/infrastructure#"; '
'class="kind"; title="compute resource"; '
'rel="http://schemas.ogf.org/occi/core#resource"'),
cats.append('%s; '
'scheme="http://schemas.openstack.org/template/os#"; '
'class="mixin"; title="%s"; '
'rel="http://schemas.ogf.org/occi/infrastructure#os_tpl"'
% (image_id, image_id)),
cats.append('%s; '
'scheme="http://schemas.openstack.org/template/resource#"; '
'class="mixin"; title="Flavor: %s"; '
'rel="http://schemas.ogf.org/occi/infrastructure#resource_tpl"'
% (flavor_id, flavor_name)),
attrs = [
'occi.core.title="%s"' % name,
'occi.compute.state="%s"' % status,
'occi.compute.memory=%s' % ram,
'occi.compute.cores=%s' % cores,
'occi.compute.hostname="%s"' % name,
'occi.core.id="%s"' % server_id,
]
links = []
links.append('<%s/compute/%s?action=restart>; '
'rel="http://schemas.ogf.org/occi/'
'infrastructure/compute/action#restart"' %
(fakes.application_url, server_id))
links.append('<%s/compute/%s?action=start>; '
'rel="http://schemas.ogf.org/occi/'
'infrastructure/compute/action#start"' %
(fakes.application_url, server_id))
links.append('<%s/compute/%s?action=stop>; '
'rel="http://schemas.ogf.org/occi/'
'infrastructure/compute/action#stop"' %
(fakes.application_url, server_id))
links.append('<%s/compute/%s?action=suspend>; '
'rel="http://schemas.ogf.org/occi/'
'infrastructure/compute/action#suspend"' %
(fakes.application_url, server_id))
result = []
for c in cats:
result.append(("Category", c))
for l in links:
result.append(("Link", l))
for a in attrs:
result.append(("X-OCCI-Attribute", a))
return result
class TestComputeController(test_middleware.TestMiddleware):
"""Test OCCI compute controller."""
def test_list_vms_empty(self):
tenant = fakes.tenants["bar"]
app = self.get_app()
for url in ("/compute/", "/compute"):
req = self._build_req(url, tenant["id"], method="GET")
m = mock.MagicMock()
m.user.project_id = tenant["id"]
req.environ["keystone.token_auth"] = m
resp = req.get_response(app)
expected_result = ""
self.assertDefaults(resp)
self.assertExpectedResult(expected_result, resp)
self.assertEqual(204, resp.status_code)
def test_list_vms(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
for url in ("/compute/", "/compute"):
req = self._build_req(url, tenant["id"], method="GET")
resp = req.get_response(app)
self.assertEqual(200, resp.status_code)
expected = []
for s in fakes.servers[tenant["id"]]:
expected.append(
("X-OCCI-Location",
utils.join_url(self.application_url + "/",
"compute/%s" % s["id"]))
)
self.assertDefaults(resp)
self.assertExpectedResult(expected, resp)
def test_show_vm(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s" % server["id"],
tenant["id"], method="GET")
resp = req.get_response(app)
expected = build_occi_server(server)
self.assertDefaults(resp)
self.assertExpectedResult(expected, resp)
self.assertEqual(200, resp.status_code)
def test_vm_not_found(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
req = self._build_req("/compute/%s" % uuid.uuid4().hex,
tenant["id"], method="GET")
resp = req.get_response(app)
self.assertEqual(404, resp.status_code)
def test_action_vm(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
for action in ("stop", "start", "restart"):
headers = {
'Category': (
'%s;'
'scheme="http://schemas.ogf.org/occi/infrastructure/'
'compute/action#";'
'class="action"' % action)
}
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s?action=%s" % (server["id"],
action),
tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
self.assertDefaults(resp)
self.assertEqual(204, resp.status_code)
def test_invalid_action(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
action = "foo"
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s?action=%s" % (server["id"],
action),
tenant["id"], method="POST")
resp = req.get_response(app)
self.assertDefaults(resp)
self.assertEqual(400, resp.status_code)
def test_action_body_mismatch(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
action = "stop"
headers = {
'Category': (
'start;'
'scheme="http://schemas.ogf.org/occi/infrastructure/'
'compute/action#";'
'class="action"')
}
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s?action=%s" % (server["id"],
action),
tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
self.assertDefaults(resp)
self.assertEqual(400, resp.status_code)
def test_create_vm(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
headers = {
'Category': (
'compute;'
'scheme="http://schemas.ogf.org/occi/infrastructure#";'
'class="kind",'
'foo;'
'scheme="http://schemas.openstack.org/template/resource#";'
'class="mixin",'
'bar;'
'scheme="http://schemas.openstack.org/template/os#";'
'class="mixin"')
}
req = self._build_req("/compute", tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
expected = [("X-OCCI-Location",
utils.join_url(self.application_url + "/",
"compute/%s" % "foo"))]
self.assertEqual(200, resp.status_code)
self.assertExpectedResult(expected, resp)
self.assertDefaults(resp)
def test_create_vm_incomplete(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
headers = {
'Category': (
'compute;'
'scheme="http://schemas.ogf.org/occi/infrastructure#";'
'class="kind",'
'bar;'
'scheme="http://schemas.openstack.org/template/os#";'
'class="mixin"')
}
req = self._build_req("/compute", tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
self.assertEqual(400, resp.status_code)
self.assertDefaults(resp)
def test_create_with_context(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
headers = {
'Category': (
'compute;'
'scheme="http://schemas.ogf.org/occi/infrastructure#";'
'class="kind",'
'foo;'
'scheme="http://schemas.openstack.org/template/resource#";'
'class="mixin",'
'bar;'
'scheme="http://schemas.openstack.org/template/os#";'
'class="mixin",'
'user_data;'
'scheme="http://schemas.openstack.org/compute/instance#";'
'class="mixin"'
),
'X-OCCI-Attribute': (
'org.openstack.compute.user_data="foo"'
)
}
req = self._build_req("/compute", tenant["id"], method="POST",
headers=headers)
resp = req.get_response(app)
expected = [("X-OCCI-Location",
utils.join_url(self.application_url + "/",
"compute/%s" % "foo"))]
self.assertEqual(200, resp.status_code)
self.assertExpectedResult(expected, resp)
self.assertDefaults(resp)
def test_vm_links(self):
tenant = fakes.tenants["baz"]
app = self.get_app()
for server in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s" % server["id"],
tenant["id"], method="GET")
resp = req.get_response(app)
self.assertDefaults(resp)
self.assertContentType(resp)
self.assertEqual(200, resp.status_code)
source = utils.join_url(self.application_url + "/",
"compute/%s" % server["id"])
# volumes
vols = server.get("os-extended-volumes:volumes_attached", [])
for v in vols:
vol_id = v["id"]
link_id = '_'.join([server["id"], vol_id])
target = utils.join_url(self.application_url + "/",
"storage/%s" % vol_id)
self.assertResultIncludesLink(link_id, source, target, resp)
# network
addresses = server.get("addresses", {})
for addr_set in addresses.values():
for addr in addr_set:
ip = addr["addr"]
link_id = '_'.join([server["id"], ip])
if addr["OS-EXT-IPS:type"] == "fixed":
net_id = "fixed"
else:
net_id = "floating"
target = utils.join_url(self.application_url + "/",
"network/%s" % net_id)
self.assertResultIncludesLink(link_id, source, target,
resp)
def test_delete_vm(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
for s in fakes.servers[tenant["id"]]:
req = self._build_req("/compute/%s" % s["id"],
tenant["id"], method="DELETE")
resp = req.get_response(app)
self.assertContentType(resp)
self.assertEqual(204, resp.status_code)
# TODO(enolfc): find a way to be sure that all servers
# are in fact deleted.
def test_delete_all_vms(self):
tenant = fakes.tenants["foo"]
app = self.get_app()
req = self._build_req("/compute/", tenant["id"], method="DELETE")
resp = req.get_response(app)
self.assertContentType(resp)
self.assertEqual(204, resp.status_code)
class ComputeControllerTextPlain(test_middleware.TestMiddlewareTextPlain,
TestComputeController):
"""Test OCCI compute controller with Accept: text/plain."""
class ComputeControllerTextOcci(test_middleware.TestMiddlewareTextOcci,
TestComputeController):
"""Test OCCI compute controller with Accept: text/occi."""
| |
#
#
# Copyright (C) 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module implementing the iallocator code."""
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import ht
from ganeti import outils
from ganeti import opcodes
import ganeti.rpc.node as rpc
from ganeti import serializer
from ganeti import utils
import ganeti.masterd.instance as gmi
_STRING_LIST = ht.TListOf(ht.TString)
_JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
# pylint: disable=E1101
# Class '...' has no 'OP_ID' member
"OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
opcodes.OpInstanceMigrate.OP_ID,
opcodes.OpInstanceReplaceDisks.OP_ID]),
})))
_NEVAC_MOVED = \
ht.TListOf(ht.TAnd(ht.TIsLength(3),
ht.TItems([ht.TNonEmptyString,
ht.TNonEmptyString,
ht.TListOf(ht.TNonEmptyString),
])))
_NEVAC_FAILED = \
ht.TListOf(ht.TAnd(ht.TIsLength(2),
ht.TItems([ht.TNonEmptyString,
ht.TMaybeString,
])))
_NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
_INST_NAME = ("name", ht.TNonEmptyString)
_INST_UUID = ("inst_uuid", ht.TNonEmptyString)
class _AutoReqParam(outils.AutoSlots):
"""Meta class for request definitions.
"""
@classmethod
def _GetSlots(mcs, attrs):
"""Extract the slots out of REQ_PARAMS.
"""
params = attrs.setdefault("REQ_PARAMS", [])
return [slot for (slot, _) in params]
class IARequestBase(outils.ValidatedSlots):
"""A generic IAllocator request object.
"""
__metaclass__ = _AutoReqParam
MODE = NotImplemented
REQ_PARAMS = []
REQ_RESULT = NotImplemented
def __init__(self, **kwargs):
"""Constructor for IARequestBase.
The constructor takes only keyword arguments and will set
attributes on this object based on the passed arguments. As such,
it means that you should not pass arguments which are not in the
REQ_PARAMS attribute for this class.
"""
outils.ValidatedSlots.__init__(self, **kwargs)
self.Validate()
def Validate(self):
"""Validates all parameters of the request.
This method returns L{None} if the validation succeeds, or raises
an exception otherwise.
@rtype: NoneType
@return: L{None}, if the validation succeeds
@raise Exception: validation fails
"""
assert self.MODE in constants.VALID_IALLOCATOR_MODES
for (param, validator) in self.REQ_PARAMS:
if not hasattr(self, param):
raise errors.OpPrereqError("Request is missing '%s' parameter" % param,
errors.ECODE_INVAL)
value = getattr(self, param)
if not validator(value):
raise errors.OpPrereqError(("Request parameter '%s' has invalid"
" type %s/value %s") %
(param, type(value), value),
errors.ECODE_INVAL)
def GetRequest(self, cfg):
"""Gets the request data dict.
@param cfg: The configuration instance
"""
raise NotImplementedError
def GetExtraParams(self): # pylint: disable=R0201
"""Gets extra parameters to the IAllocator call.
"""
return {}
def ValidateResult(self, ia, result):
"""Validates the result of an request.
@param ia: The IAllocator instance
@param result: The IAllocator run result
@raises ResultValidationError: If validation fails
"""
if ia.success and not self.REQ_RESULT(result):
raise errors.ResultValidationError("iallocator returned invalid result,"
" expected %s, got %s" %
(self.REQ_RESULT, result))
class IAReqInstanceAlloc(IARequestBase):
"""An instance allocation request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_ALLOC
REQ_PARAMS = [
_INST_NAME,
("memory", ht.TNonNegativeInt),
("spindle_use", ht.TNonNegativeInt),
("disks", ht.TListOf(ht.TDict)),
("disk_template", ht.TString),
("group_name", ht.TMaybe(ht.TNonEmptyString)),
("os", ht.TString),
("tags", _STRING_LIST),
("nics", ht.TListOf(ht.TDict)),
("vcpus", ht.TInt),
("hypervisor", ht.TString),
("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)),
]
REQ_RESULT = ht.TList
def RequiredNodes(self):
"""Calculates the required nodes based on the disk_template.
"""
if self.disk_template in constants.DTS_INT_MIRROR:
return 2
else:
return 1
def GetRequest(self, cfg):
"""Requests a new instance.
The checks for the completeness of the opcode must have already been
done.
"""
for d in self.disks:
d[constants.IDISK_TYPE] = self.disk_template
disk_space = gmi.ComputeDiskSize(self.disks)
return {
"name": self.name,
"disk_template": self.disk_template,
"group_name": self.group_name,
"tags": self.tags,
"os": self.os,
"vcpus": self.vcpus,
"memory": self.memory,
"spindle_use": self.spindle_use,
"disks": self.disks,
"disk_space_total": disk_space,
"nics": self.nics,
"required_nodes": self.RequiredNodes(),
"hypervisor": self.hypervisor,
}
def ValidateResult(self, ia, result):
"""Validates an single instance allocation request.
"""
IARequestBase.ValidateResult(self, ia, result)
if ia.success and len(result) != self.RequiredNodes():
raise errors.ResultValidationError("iallocator returned invalid number"
" of nodes (%s), required %s" %
(len(result), self.RequiredNodes()))
class IAReqMultiInstanceAlloc(IARequestBase):
"""An multi instance allocation request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC
REQ_PARAMS = [
("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))),
]
_MASUCCESS = \
ht.TListOf(ht.TAnd(ht.TIsLength(2),
ht.TItems([ht.TNonEmptyString,
ht.TListOf(ht.TNonEmptyString),
])))
_MAFAILED = ht.TListOf(ht.TNonEmptyString)
REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2),
ht.TItems([_MASUCCESS, _MAFAILED]))
def GetRequest(self, cfg):
return {
"instances": [iareq.GetRequest(cfg) for iareq in self.instances],
}
class IAReqRelocate(IARequestBase):
"""A relocation request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_RELOC
REQ_PARAMS = [
_INST_UUID,
("relocate_from_node_uuids", _STRING_LIST),
]
REQ_RESULT = ht.TList
def GetRequest(self, cfg):
"""Request an relocation of an instance
The checks for the completeness of the opcode must have already been
done.
"""
instance = cfg.GetInstanceInfo(self.inst_uuid)
disks = cfg.GetInstanceDisks(self.inst_uuid)
if instance is None:
raise errors.ProgrammerError("Unknown instance '%s' passed to"
" IAllocator" % self.inst_uuid)
if not utils.AllDiskOfType(disks, constants.DTS_MIRRORED):
raise errors.OpPrereqError("Can't relocate non-mirrored instances",
errors.ECODE_INVAL)
secondary_nodes = cfg.GetInstanceSecondaryNodes(instance.uuid)
if (utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR) and
len(secondary_nodes) != 1):
raise errors.OpPrereqError("Instance has not exactly one secondary node",
errors.ECODE_STATE)
disk_sizes = [{constants.IDISK_SIZE: disk.size,
constants.IDISK_TYPE: disk.dev_type} for disk in disks]
disk_space = gmi.ComputeDiskSize(disk_sizes)
return {
"name": instance.name,
"disk_space_total": disk_space,
"required_nodes": 1,
"relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids),
}
def ValidateResult(self, ia, result):
"""Validates the result of an relocation request.
"""
IARequestBase.ValidateResult(self, ia, result)
node2group = dict((name, ndata["group"])
for (name, ndata) in ia.in_data["nodes"].items())
fn = compat.partial(self._NodesToGroups, node2group,
ia.in_data["nodegroups"])
instance = ia.cfg.GetInstanceInfo(self.inst_uuid)
request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) +
ia.cfg.GetNodeNames([instance.primary_node]))
result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node]))
if ia.success and not set(result_groups).issubset(request_groups):
raise errors.ResultValidationError("Groups of nodes returned by"
" iallocator (%s) differ from original"
" groups (%s)" %
(utils.CommaJoin(result_groups),
utils.CommaJoin(request_groups)))
@staticmethod
def _NodesToGroups(node2group, groups, nodes):
"""Returns a list of unique group names for a list of nodes.
@type node2group: dict
@param node2group: Map from node name to group UUID
@type groups: dict
@param groups: Group information
@type nodes: list
@param nodes: Node names
"""
result = set()
for node in nodes:
try:
group_uuid = node2group[node]
except KeyError:
# Ignore unknown node
pass
else:
try:
group = groups[group_uuid]
except KeyError:
# Can't find group, let's use UUID
group_name = group_uuid
else:
group_name = group["name"]
result.add(group_name)
return sorted(result)
class IAReqNodeEvac(IARequestBase):
"""A node evacuation request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_NODE_EVAC
REQ_PARAMS = [
("instances", _STRING_LIST),
("evac_mode", ht.TEvacMode),
("ignore_soft_errors", ht.TBool),
]
REQ_RESULT = _NEVAC_RESULT
def GetRequest(self, cfg):
"""Get data for node-evacuate requests.
"""
return {
"instances": self.instances,
"evac_mode": self.evac_mode,
}
def GetExtraParams(self):
"""Get extra iallocator command line options for
node-evacuate requests.
"""
if self.ignore_soft_errors:
return {"ignore-soft-errors": None}
else:
return {}
class IAReqGroupChange(IARequestBase):
"""A group change request.
"""
# pylint: disable=E1101
MODE = constants.IALLOCATOR_MODE_CHG_GROUP
REQ_PARAMS = [
("instances", _STRING_LIST),
("target_groups", _STRING_LIST),
]
REQ_RESULT = _NEVAC_RESULT
def GetRequest(self, cfg):
"""Get data for node-evacuate requests.
"""
return {
"instances": self.instances,
"target_groups": self.target_groups,
}
class IAllocator(object):
"""IAllocator framework.
An IAllocator instance has three sets of attributes:
- cfg that is needed to query the cluster
- input data (all members of the _KEYS class attribute are required)
- four buffer attributes (in|out_data|text), that represent the
input (to the external script) in text and data structure format,
and the output from it, again in two formats
- the result variables from the script (success, info, nodes) for
easy usage
"""
# pylint: disable=R0902
# lots of instance attributes
def __init__(self, cfg, rpc_runner, req):
self.cfg = cfg
self.rpc = rpc_runner
self.req = req
# init buffer variables
self.in_text = self.out_text = self.in_data = self.out_data = None
# init result fields
self.success = self.info = self.result = None
self._BuildInputData(req)
def _ComputeClusterDataNodeInfo(self, disk_templates, node_list,
cluster_info, hypervisor_name):
"""Prepare and execute node info call.
@type disk_templates: list of string
@param disk_templates: the disk templates of the instances to be allocated
@type node_list: list of strings
@param node_list: list of nodes' UUIDs
@type cluster_info: L{objects.Cluster}
@param cluster_info: the cluster's information from the config
@type hypervisor_name: string
@param hypervisor_name: the hypervisor name
@rtype: same as the result of the node info RPC call
@return: the result of the node info RPC call
"""
storage_units_raw = utils.storage.GetStorageUnits(self.cfg, disk_templates)
storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw,
node_list)
hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])]
return self.rpc.call_node_info(node_list, storage_units, hvspecs)
def _ComputeClusterData(self, disk_template=None):
"""Compute the generic allocator input data.
@type disk_template: list of string
@param disk_template: the disk templates of the instances to be allocated
"""
cfg = self.cfg.GetDetachedConfig()
cluster_info = cfg.GetClusterInfo()
# cluster data
data = {
"version": constants.IALLOCATOR_VERSION,
"cluster_name": cluster_info.cluster_name,
"cluster_tags": list(cluster_info.GetTags()),
"enabled_hypervisors": list(cluster_info.enabled_hypervisors),
"ipolicy": cluster_info.ipolicy,
}
ginfo = cfg.GetAllNodeGroupsInfo()
ninfo = cfg.GetAllNodesInfo()
iinfo = cfg.GetAllInstancesInfo()
i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo.values()]
# node data
node_list = [n.uuid for n in ninfo.values() if n.vm_capable]
if isinstance(self.req, IAReqInstanceAlloc):
hypervisor_name = self.req.hypervisor
node_whitelist = self.req.node_whitelist
elif isinstance(self.req, IAReqRelocate):
hypervisor_name = iinfo[self.req.inst_uuid].hypervisor
node_whitelist = None
else:
hypervisor_name = cluster_info.primary_hypervisor
node_whitelist = None
if not disk_template:
disk_template = cluster_info.enabled_disk_templates[0]
node_data = self._ComputeClusterDataNodeInfo([disk_template], node_list,
cluster_info, hypervisor_name)
node_iinfo = \
self.rpc.call_all_instances_info(node_list,
cluster_info.enabled_hypervisors,
cluster_info.hvparams)
data["nodegroups"] = self._ComputeNodeGroupData(cluster_info, ginfo)
config_ndata = self._ComputeBasicNodeData(cfg, ninfo, node_whitelist)
data["nodes"] = self._ComputeDynamicNodeData(
ninfo, node_data, node_iinfo, i_list, config_ndata, disk_template)
assert len(data["nodes"]) == len(ninfo), \
"Incomplete node data computed"
data["instances"] = self._ComputeInstanceData(cfg, cluster_info, i_list,
disk_template)
self.in_data = data
@staticmethod
def _ComputeNodeGroupData(cluster, ginfo):
"""Compute node groups data.
"""
ng = dict((guuid, {
"name": gdata.name,
"alloc_policy": gdata.alloc_policy,
"networks": [net_uuid for net_uuid, _ in gdata.networks.items()],
"ipolicy": gmi.CalculateGroupIPolicy(cluster, gdata),
"tags": list(gdata.GetTags()),
})
for guuid, gdata in ginfo.items())
return ng
@staticmethod
def _ComputeBasicNodeData(cfg, node_cfg, node_whitelist):
"""Compute global node data.
@rtype: dict
@returns: a dict of name: (node dict, node config)
"""
# fill in static (config-based) values
node_results = dict((ninfo.name, {
"tags": list(ninfo.GetTags()),
"primary_ip": ninfo.primary_ip,
"secondary_ip": ninfo.secondary_ip,
"offline": (ninfo.offline or
not (node_whitelist is None or
ninfo.name in node_whitelist)),
"drained": ninfo.drained,
"master_candidate": ninfo.master_candidate,
"group": ninfo.group,
"master_capable": ninfo.master_capable,
"vm_capable": ninfo.vm_capable,
"ndparams": cfg.GetNdParams(ninfo),
})
for ninfo in node_cfg.values())
return node_results
@staticmethod
def _GetAttributeFromHypervisorNodeData(hv_info, node_name, attr):
"""Extract an attribute from the hypervisor's node information.
This is a helper function to extract data from the hypervisor's information
about the node, as part of the result of a node_info query.
@type hv_info: dict of strings
@param hv_info: dictionary of node information from the hypervisor
@type node_name: string
@param node_name: name of the node
@type attr: string
@param attr: key of the attribute in the hv_info dictionary
@rtype: integer
@return: the value of the attribute
@raises errors.OpExecError: if key not in dictionary or value not
integer
"""
if attr not in hv_info:
raise errors.OpExecError("Node '%s' didn't return attribute"
" '%s'" % (node_name, attr))
value = hv_info[attr]
if not isinstance(value, int):
raise errors.OpExecError("Node '%s' returned invalid value"
" for '%s': %s" %
(node_name, attr, value))
return value
@staticmethod
def _ComputeStorageDataFromSpaceInfoByTemplate(
space_info, node_name, disk_template):
"""Extract storage data from node info.
@type space_info: see result of the RPC call node info
@param space_info: the storage reporting part of the result of the RPC call
node info
@type node_name: string
@param node_name: the node's name
@type disk_template: string
@param disk_template: the disk template to report space for
@rtype: 4-tuple of integers
@return: tuple of storage info (total_disk, free_disk, total_spindles,
free_spindles)
"""
storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
if storage_type not in constants.STS_REPORT:
total_disk = total_spindles = 0
free_disk = free_spindles = 0
else:
template_space_info = utils.storage.LookupSpaceInfoByDiskTemplate(
space_info, disk_template)
if not template_space_info:
raise errors.OpExecError("Node '%s' didn't return space info for disk"
"template '%s'" % (node_name, disk_template))
total_disk = template_space_info["storage_size"]
free_disk = template_space_info["storage_free"]
total_spindles = 0
free_spindles = 0
if disk_template in constants.DTS_LVM:
lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_PV)
if lvm_pv_info:
total_spindles = lvm_pv_info["storage_size"]
free_spindles = lvm_pv_info["storage_free"]
return (total_disk, free_disk, total_spindles, free_spindles)
@staticmethod
def _ComputeStorageDataFromSpaceInfo(space_info, node_name, has_lvm):
"""Extract storage data from node info.
@type space_info: see result of the RPC call node info
@param space_info: the storage reporting part of the result of the RPC call
node info
@type node_name: string
@param node_name: the node's name
@type has_lvm: boolean
@param has_lvm: whether or not LVM storage information is requested
@rtype: 4-tuple of integers
@return: tuple of storage info (total_disk, free_disk, total_spindles,
free_spindles)
"""
# TODO: replace this with proper storage reporting
if has_lvm:
lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_VG)
if not lvm_vg_info:
raise errors.OpExecError("Node '%s' didn't return LVM vg space info."
% (node_name))
total_disk = lvm_vg_info["storage_size"]
free_disk = lvm_vg_info["storage_free"]
lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
space_info, constants.ST_LVM_PV)
if not lvm_pv_info:
raise errors.OpExecError("Node '%s' didn't return LVM pv space info."
% (node_name))
total_spindles = lvm_pv_info["storage_size"]
free_spindles = lvm_pv_info["storage_free"]
else:
# we didn't even ask the node for VG status, so use zeros
total_disk = free_disk = 0
total_spindles = free_spindles = 0
return (total_disk, free_disk, total_spindles, free_spindles)
@staticmethod
def _ComputeInstanceMemory(instance_list, node_instances_info, node_uuid,
input_mem_free):
"""Compute memory used by primary instances.
@rtype: tuple (int, int, int)
@returns: A tuple of three integers: 1. the sum of memory used by primary
instances on the node (including the ones that are currently down), 2.
the sum of memory used by primary instances of the node that are up, 3.
the amount of memory that is free on the node considering the current
usage of the instances.
"""
i_p_mem = i_p_up_mem = 0
mem_free = input_mem_free
for iinfo, beinfo in instance_list:
if iinfo.primary_node == node_uuid:
i_p_mem += beinfo[constants.BE_MAXMEM]
if iinfo.name not in node_instances_info[node_uuid].payload:
i_used_mem = 0
else:
i_used_mem = int(node_instances_info[node_uuid]
.payload[iinfo.name]["memory"])
i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
mem_free -= max(0, i_mem_diff)
if iinfo.admin_state == constants.ADMINST_UP:
i_p_up_mem += beinfo[constants.BE_MAXMEM]
return (i_p_mem, i_p_up_mem, mem_free)
def _ComputeDynamicNodeData(self, node_cfg, node_data, node_iinfo, i_list,
node_results, disk_template):
"""Compute global node data.
@param node_results: the basic node structures as filled from the config
"""
#TODO(dynmem): compute the right data on MAX and MIN memory
# make a copy of the current dict
node_results = dict(node_results)
for nuuid, nresult in node_data.items():
ninfo = node_cfg[nuuid]
assert ninfo.name in node_results, "Missing basic data for node %s" % \
ninfo.name
if not ninfo.offline:
nresult.Raise("Can't get data for node %s" % ninfo.name)
node_iinfo[nuuid].Raise("Can't get node instance info from node %s" %
ninfo.name)
(_, space_info, (hv_info, )) = nresult.payload
mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name,
"memory_free")
(i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory(
i_list, node_iinfo, nuuid, mem_free)
(total_disk, free_disk, total_spindles, free_spindles) = \
self._ComputeStorageDataFromSpaceInfoByTemplate(
space_info, ninfo.name, disk_template)
# compute memory used by instances
pnr_dyn = {
"total_memory": self._GetAttributeFromHypervisorNodeData(
hv_info, ninfo.name, "memory_total"),
"reserved_memory": self._GetAttributeFromHypervisorNodeData(
hv_info, ninfo.name, "memory_dom0"),
"free_memory": mem_free,
"total_disk": total_disk,
"free_disk": free_disk,
"total_spindles": total_spindles,
"free_spindles": free_spindles,
"total_cpus": self._GetAttributeFromHypervisorNodeData(
hv_info, ninfo.name, "cpu_total"),
"reserved_cpus": self._GetAttributeFromHypervisorNodeData(
hv_info, ninfo.name, "cpu_dom0"),
"i_pri_memory": i_p_mem,
"i_pri_up_memory": i_p_up_mem,
}
pnr_dyn.update(node_results[ninfo.name])
node_results[ninfo.name] = pnr_dyn
return node_results
@staticmethod
def _ComputeInstanceData(cfg, cluster_info, i_list, disk_template):
"""Compute global instance data.
"""
instance_data = {}
for iinfo, beinfo in i_list:
nic_data = []
for nic in iinfo.nics:
filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
nic_dict = {
"mac": nic.mac,
"ip": nic.ip,
"mode": filled_params[constants.NIC_MODE],
"link": filled_params[constants.NIC_LINK],
}
if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
nic_dict["bridge"] = filled_params[constants.NIC_LINK]
nic_data.append(nic_dict)
inst_disks = cfg.GetInstanceDisks(iinfo.uuid)
pir = {
"tags": list(iinfo.GetTags()),
"admin_state": iinfo.admin_state,
"vcpus": beinfo[constants.BE_VCPUS],
"memory": beinfo[constants.BE_MAXMEM],
"spindle_use": beinfo[constants.BE_SPINDLE_USE],
"os": iinfo.os,
"nodes": [cfg.GetNodeName(iinfo.primary_node)] +
cfg.GetNodeNames(
cfg.GetInstanceSecondaryNodes(iinfo.uuid)),
"nics": nic_data,
"disks": [{constants.IDISK_TYPE: dsk.dev_type,
constants.IDISK_SIZE: dsk.size,
constants.IDISK_MODE: dsk.mode,
constants.IDISK_SPINDLES: dsk.spindles}
for dsk in inst_disks],
"disk_template": disk_template,
"disks_active": iinfo.disks_active,
"hypervisor": iinfo.hypervisor,
}
pir["disk_space_total"] = gmi.ComputeDiskSize(pir["disks"])
instance_data[iinfo.name] = pir
return instance_data
def _BuildInputData(self, req):
"""Build input data structures.
"""
request = req.GetRequest(self.cfg)
disk_template = None
if request.get("disk_template") is not None:
disk_template = request["disk_template"]
else:
disk_template = self.cfg.GetInstanceDiskTemplate(self.req.inst_uuid)
if disk_template is None:
raise errors.ProgrammerError("disk template should not be none")
self._ComputeClusterData(disk_template=disk_template)
request["type"] = req.MODE
self.in_data["request"] = request
self.in_text = serializer.Dump(self.in_data)
def Run(self, name, validate=True, call_fn=None):
"""Run an instance allocator and return the results.
"""
if call_fn is None:
call_fn = self.rpc.call_iallocator_runner
ial_params = self.cfg.GetDefaultIAllocatorParameters()
for ial_param in self.req.GetExtraParams().items():
ial_params[ial_param[0]] = ial_param[1]
result = call_fn(self.cfg.GetMasterNode(), name, self.in_text, ial_params)
result.Raise("Failure while running the iallocator script")
self.out_text = result.payload
if validate:
self._ValidateResult()
def _ValidateResult(self):
"""Process the allocator results.
This will process and if successful save the result in
self.out_data and the other parameters.
"""
try:
rdict = serializer.Load(self.out_text)
except Exception, err:
raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
if not isinstance(rdict, dict):
raise errors.OpExecError("Can't parse iallocator results: not a dict")
# TODO: remove backwards compatiblity in later versions
if "nodes" in rdict and "result" not in rdict:
rdict["result"] = rdict["nodes"]
del rdict["nodes"]
for key in "success", "info", "result":
if key not in rdict:
raise errors.OpExecError("Can't parse iallocator results:"
" missing key '%s'" % key)
setattr(self, key, rdict[key])
self.req.ValidateResult(self, self.result)
self.out_data = rdict
| |
"""Implementation of :class:`ModularInteger` class. """
from __future__ import print_function, division
import operator
from sympy.polys.polyutils import PicklableWithSlots
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.domains.domainelement import DomainElement
from sympy.utilities import public
@public
class ModularInteger(PicklableWithSlots, DomainElement):
"""A class representing a modular integer. """
mod, dom, sym, _parent = None, None, None, None
__slots__ = ['val']
def parent(self):
return self._parent
def __init__(self, val):
if isinstance(val, self.__class__):
self.val = val.val % self.mod
else:
self.val = self.dom.convert(val) % self.mod
def __hash__(self):
return hash((self.val, self.mod))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.val)
def __str__(self):
return "%s mod %s" % (self.val, self.mod)
def __int__(self):
return int(self.to_int())
def to_int(self):
if self.sym:
if self.val <= self.mod // 2:
return self.val
else:
return self.val - self.mod
else:
return self.val
def __pos__(self):
return self
def __neg__(self):
return self.__class__(-self.val)
@classmethod
def _get_val(cls, other):
if isinstance(other, cls):
return other.val
else:
try:
return cls.dom.convert(other)
except CoercionFailed:
return None
def __add__(self, other):
val = self._get_val(other)
if val is not None:
return self.__class__(self.val + val)
else:
return NotImplemented
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
val = self._get_val(other)
if val is not None:
return self.__class__(self.val - val)
else:
return NotImplemented
def __rsub__(self, other):
return (-self).__add__(other)
def __mul__(self, other):
val = self._get_val(other)
if val is not None:
return self.__class__(self.val * val)
else:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
val = self._get_val(other)
if val is not None:
return self.__class__(self.val * self._invert(val))
else:
return NotImplemented
def __rdiv__(self, other):
return self.invert().__mul__(other)
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __mod__(self, other):
val = self._get_val(other)
if val is not None:
return self.__class__(self.val % val)
else:
return NotImplemented
def __rmod__(self, other):
val = self._get_val(other)
if val is not None:
return self.__class__(val % self.val)
else:
return NotImplemented
def __pow__(self, exp):
if not exp:
return self.__class__(self.dom.one)
if exp < 0:
val, exp = self.invert().val, -exp
else:
val = self.val
return self.__class__(pow(val, int(exp), self.mod))
def _compare(self, other, op):
val = self._get_val(other)
if val is not None:
return op(self.val, val % self.mod)
else:
return NotImplemented
def __eq__(self, other):
return self._compare(other, operator.eq)
def __ne__(self, other):
return self._compare(other, operator.ne)
def __lt__(self, other):
return self._compare(other, operator.lt)
def __le__(self, other):
return self._compare(other, operator.le)
def __gt__(self, other):
return self._compare(other, operator.gt)
def __ge__(self, other):
return self._compare(other, operator.ge)
def __nonzero__(self):
return bool(self.val)
__bool__ = __nonzero__
@classmethod
def _invert(cls, value):
return cls.dom.invert(value, cls.mod)
def invert(self):
return self.__class__(self._invert(self.val))
_modular_integer_cache = {}
def ModularIntegerFactory(_mod, _dom, _sym, parent):
"""Create custom class for specific integer modulus."""
try:
_mod = _dom.convert(_mod)
except CoercionFailed:
ok = False
else:
ok = True
if not ok or _mod < 1:
raise ValueError("modulus must be a positive integer, got %s" % _mod)
key = _mod, _dom, _sym
try:
cls = _modular_integer_cache[key]
except KeyError:
class cls(ModularInteger):
mod, dom, sym = _mod, _dom, _sym
_parent = parent
if _sym:
cls.__name__ = "SymmetricModularIntegerMod%s" % _mod
else:
cls.__name__ = "ModularIntegerMod%s" % _mod
_modular_integer_cache[key] = cls
return cls
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import os
import re
import itertools
import datetime
import six
from six.moves.urllib.parse import urlencode
from ..results import iter_results
from ..console import log
from ..publishing import OutputPublisher
from ..step_detect import detect_regressions
from .. import util
from .. import feed
class Regressions(OutputPublisher):
name = "regressions"
button_label = "Show regressions"
description = "Display information about recent regressions"
order = 3
@classmethod
def publish(cls, conf, repo, benchmarks, graphs, revisions):
# Analyze the data in the graphs --- it's been cleaned up and
# it's easier to work with than the results directly
regressions = []
revision_to_hash = dict((r, h) for h, r in six.iteritems(revisions))
data_filter = _GraphDataFilter(conf, repo, revisions)
all_params = graphs.get_params()
for j, (file_name, graph) in enumerate(graphs):
if 'summary' in graph.params:
continue
benchmark_name = os.path.basename(file_name)
benchmark = benchmarks.get(benchmark_name)
if not benchmark:
continue
log.dot()
for graph_data in data_filter.get_graph_data(graph, benchmark):
cls._process_regression(regressions, revision_to_hash, repo, all_params,
graph_data, graph)
cls._save(conf, {'regressions': regressions})
cls._save_feed(conf, benchmarks, regressions, revisions, revision_to_hash)
@classmethod
def _process_regression(cls, regressions, revision_to_hash, repo, all_params,
graph_data, graph):
j, entry_name, steps, threshold = graph_data
last_v, best_v, jumps = detect_regressions(steps, threshold)
if last_v is None:
return
# Select unique graph params
graph_params = {}
for name, value in six.iteritems(graph.params):
if len(all_params[name]) > 1:
graph_params[name] = value
graph_path = graph.path + '.json'
# Check which ranges are a single commit
for k, jump in enumerate(jumps):
commit_a = revision_to_hash[jump[0]]
commit_b = revision_to_hash[jump[1]]
spec = repo.get_range_spec(commit_a, commit_b)
commits = repo.get_hashes_from_range(spec)
if len(commits) == 1:
jumps[k] = (None, jump[1], jump[2], jump[3])
# Produce output
regression = [entry_name, graph_path, graph_params, j, last_v, best_v, jumps]
regressions.append(regression)
@classmethod
def _save(cls, conf, data):
fn = os.path.join(conf.html_dir, 'regressions.json')
util.write_json(fn, data, compact=True)
@classmethod
def _save_feed(cls, conf, benchmarks, data, revisions, revision_to_hash):
"""
Save the results as an Atom feed
"""
filename = os.path.join(conf.html_dir, 'regressions.xml')
# Determine publication date as the date when the benchmark
# was run --- if it is missing, use the date of the commit
run_timestamps = {}
revision_timestamps = {}
for results in iter_results(conf.results_dir):
if results.commit_hash not in revisions:
# revisions could be filtered when specifying a range
# in 'asv publish'
continue
revision = revisions[results.commit_hash]
revision_timestamps[revision] = results.date
# Time when the benchmark was run
for benchmark_name, timestamp in six.iteritems(results.started_at):
if timestamp is None:
continue
key = (benchmark_name, revision)
run_timestamps[key] = timestamp
# Fallback to commit date
for benchmark_name in results.get_result_keys(benchmarks):
key = (benchmark_name, revision)
run_timestamps.setdefault(key, results.date)
# Generate feed entries
entries = []
for name, graph_path, graph_params, idx, last_value, best_value, jumps in data:
if '(' in name:
benchmark_name = name[:name.index('(')]
else:
benchmark_name = name
benchmark = benchmarks[benchmark_name]
if idx is not None:
graph_params = dict(graph_params)
# Add URL parameters
param_values, = itertools.islice(itertools.product(*benchmark['params']),
idx, idx + 1)
for k, v in zip(benchmark['param_names'], param_values):
graph_params['p-' + k] = v
for rev1, rev2, value1, value2 in jumps:
timestamps = (run_timestamps[benchmark_name, t] for t in (rev1, rev2) if t is not None)
last_timestamp = max(timestamps)
updated = datetime.datetime.fromtimestamp(last_timestamp/1000)
params = dict(graph_params)
if rev1 is None:
params['commits'] = '{0}'.format(revision_to_hash[rev2])
else:
params['commits'] = '{0}-{1}'.format(revision_to_hash[rev1],
revision_to_hash[rev2])
link = 'index.html#{0}?{1}'.format(benchmark_name, urlencode(params))
try:
best_percentage = "{0:.2f}%".format(100 * (last_value - best_value) / best_value)
except ZeroDivisionError:
best_percentage = "{0:.2g} units".format(last_value - best_value)
try:
percentage = "{0:.2f}%".format(100 * (value2 - value1) / value1)
except ZeroDivisionError:
percentage = "{0:.2g} units".format(value2 - value1)
jump_date = datetime.datetime.fromtimestamp(revision_timestamps[rev2]/1000)
jump_date_str = jump_date.strftime('%Y-%m-%d %H:%M:%S')
if rev1 is not None:
commit_a = revision_to_hash[rev1]
commit_b = revision_to_hash[rev2]
if 'github.com' in conf.show_commit_url:
commit_url = conf.show_commit_url + '../compare/' + commit_a + "..." + commit_b
else:
commit_url = conf.show_commit_url + commit_a
commit_ref = 'in commits <a href="{0}">{1}...{2}</a>'.format(commit_url,
commit_a[:8],
commit_b[:8])
else:
commit_a = revision_to_hash[rev2]
commit_url = conf.show_commit_url + commit_a
commit_ref = 'in commit <a href="{0}">{1}</a>'.format(commit_url, commit_a[:8])
unit = benchmark.get('unit', '')
best_value_str = util.human_value(best_value, unit)
last_value_str = util.human_value(last_value, unit)
value1_str = util.human_value(value1, unit)
value2_str = util.human_value(value2, unit)
title = "{percentage} {name}".format(**locals())
summary = """
<a href="{link}">{percentage} regression</a> on {jump_date_str} {commit_ref}.<br>
New value: {value2_str}, old value: {value1_str}.<br>
Latest value: {last_value_str} ({best_percentage} worse than best value {best_value_str}).
""".format(**locals()).strip()
# Information that uniquely identifies a regression
# --- if the values and the texts change on later
# runs, feed readers should is identify the regression
# as the same one, as long as the benchmark name and
# commits match.
id_context = [name, revision_to_hash.get(rev1, ""), revision_to_hash.get(rev2, "")]
id_date = util.js_timestamp_to_datetime(revision_timestamps[rev2])
entries.append(feed.FeedEntry(title, updated, link, summary, id_context, id_date))
entries.sort(key=lambda x: x.updated, reverse=True)
feed.write_atom(filename, entries,
title='{0} performance regressions'.format(conf.project),
author='Airspeed Velocity',
address='{0}.asv'.format(conf.project))
class _GraphDataFilter(object):
"""
Obtain data sets from graphs, following configuration settings.
"""
def __init__(self, conf, repo, revisions):
self.conf = conf
self.repo = repo
self.revisions = revisions
self._start_revisions = {}
def get_graph_data(self, graph, benchmark):
"""
Iterator over graph data sets
Yields
------
param_idx
Flat index to parameter permutations for parameterized benchmarks.
None if benchmark is not parameterized.
entry_name
Name for the data set. If benchmark is non-parameterized, this is the
benchmark name.
steps
Steps to consider in regression detection.
threshold
User-specified threshold for regression detection.
"""
if benchmark.get('params'):
param_iter = enumerate(zip(itertools.product(*benchmark['params']),
graph.get_steps()))
else:
param_iter = [(None, (None, graph.get_steps()))]
for j, (param, steps) in param_iter:
if param is None:
entry_name = benchmark['name']
else:
entry_name = benchmark['name'] + '({0})'.format(', '.join(param))
start_revision = self._get_start_revision(graph, benchmark, entry_name)
threshold = self._get_threshold(graph, benchmark, entry_name)
if start_revision is None:
# Skip detection
continue
steps = [step for step in steps if step[1] >= start_revision]
yield j, entry_name, steps, threshold
def _get_start_revision(self, graph, benchmark, entry_name):
"""
Compute the first revision allowed by asv.conf.json.
Revisions correspond to linearized commit history and the
regression detection runs on this order --- the starting commit
thus corresponds to a specific starting revision.
"""
start_revision = min(six.itervalues(self.revisions))
if graph.params.get('branch'):
branch_suffix = '@' + graph.params.get('branch')
else:
branch_suffix = ''
for regex, start_commit in six.iteritems(self.conf.regressions_first_commits):
if re.match(regex, entry_name + branch_suffix):
if start_commit is None:
# Disable regression detection completely
return None
if self.conf.branches == [None]:
key = (start_commit, None)
else:
key = (start_commit, graph.params.get('branch'))
if key not in self._start_revisions:
spec = self.repo.get_new_range_spec(*key)
start_hash = self.repo.get_hash_from_name(start_commit)
for commit in [start_hash] + self.repo.get_hashes_from_range(spec):
rev = self.revisions.get(commit)
if rev is not None:
self._start_revisions[key] = rev
break
else:
# Commit not found in the branch --- warn and ignore.
log.warning(("Commit {0} specified in `regressions_first_commits` "
"not found in branch").format(start_commit))
self._start_revisions[key] = -1
start_revision = max(start_revision, self._start_revisions[key] + 1)
return start_revision
def _get_threshold(self, graph, benchmark, entry_name):
"""
Compute the regression threshold in asv.conf.json.
"""
if graph.params.get('branch'):
branch_suffix = '@' + graph.params.get('branch')
else:
branch_suffix = ''
max_threshold = None
for regex, threshold in six.iteritems(self.conf.regressions_thresholds):
if re.match(regex, entry_name + branch_suffix):
try:
threshold = float(threshold)
except ValueError:
raise util.UserError("Non-float threshold in asv.conf.json: {!r}".format(threshold))
if max_threshold is None:
max_threshold = threshold
else:
max_threshold = max(threshold, max_threshold)
if max_threshold is None:
max_threshold = 0.05
return max_threshold
| |
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Views for Organization Admins.
"""
__authors__ = [
'"Lennard de Rijk" <ljvderijk@gmail.com>'
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from django import forms
from django.utils.translation import ugettext
from soc.logic import dicts
from soc.logic.models import organization as org_logic
from soc.logic.models import org_admin as org_admin_logic
from soc.logic.models import student as student_logic
from soc.views.helper import access
from soc.views.helper import dynaform
from soc.views.helper import redirects
from soc.views.helper import responses
from soc.views.helper import params as params_helper
from soc.views.helper import widgets
from soc.views.models import organization as org_view
from soc.views.models import role
import soc.logic.models.org_admin
class View(role.View):
"""View methods for the Organization Admin model.
"""
DEF_ALREADY_AGREED_MSG = ugettext(
"You have already accepted this agreement when submitting "
"the organization application.")
def __init__(self, params=None):
"""Defines the fields and methods required for the base View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = access.Checker(params)
rights['create'] = ['checkIsDeveloper']
rights['edit'] = [('checkIsMyActiveRole', org_admin_logic.logic)]
rights['delete'] = ['checkIsDeveloper']
rights['invite'] = [('checkHasRoleForScope',
org_admin_logic.logic)]
rights['accept_invite'] = [
('checkIsMyRequestWithStatus', [['group_accepted']]),
('checkIsNotStudentForProgramOfOrgInRequest', [org_logic.logic,
student_logic.logic])]
rights['process_request'] = [
('checkCanProcessRequest', [[org_admin_logic.logic]])]
rights['manage'] = [
('checkIsAllowedToManageRole', [org_admin_logic.logic,
org_admin_logic.logic])]
new_params = {}
new_params['logic'] = soc.logic.models.org_admin.logic
new_params['group_logic'] = org_logic.logic
new_params['group_view'] = org_view.view
new_params['rights'] = rights
new_params['scope_view'] = org_view
new_params['name'] = "Organization Admin"
new_params['module_name'] = "org_admin"
new_params['sidebar_grouping'] = 'Organizations'
new_params['extra_dynaexclude'] = ['agreed_to_tos', 'program']
new_params['create_dynafields'] = [
{'name': 'scope_path',
'base': forms.fields.CharField,
'widget': forms.HiddenInput,
'required': True,
},
{'name': 'admin_agreement',
'base': forms.fields.CharField,
'required': False,
'widget': widgets.AgreementField,
'group': ugettext("5. Terms of Service"),
},
{'name': 'agreed_to_admin_agreement',
'base': forms.fields.BooleanField,
'initial': False,
'required':True,
'label': ugettext('I agree to the Admin Agreement'),
'group': ugettext("5. Terms of Service"),
},
]
new_params['allow_invites'] = True
# only if subclassed, so params is not empty
new_params['show_in_roles_overview'] = bool(params)
new_params['public_field_keys'] = ['name', 'link_id', 'scope_path']
new_params['public_field_names'] = ["Admin Name", "Admin ID", "Organization ID"]
params = dicts.merge(params, new_params)
super(View, self).__init__(params=params)
params = self.getParams()
# register the role with the group_view
params['group_view'].registerRole(self._logic.role_name, self)
# create and store the special form for invited users
dynafields = [
{'name': 'link_id',
'base': forms.CharField,
'widget': widgets.ReadOnlyInput(),
'required': False,
},
{'name': 'admin_agreement',
'base': forms.fields.Field,
'required': False,
'widget': widgets.AgreementField,
'group': ugettext("5. Terms of Service"),
},
]
dynaproperties = params_helper.getDynaFields(dynafields)
invited_create_form = dynaform.extendDynaForm(
dynaform = params['create_form'],
dynaproperties = dynaproperties)
params['invited_create_form'] = invited_create_form
# add the contact field to the admin list
params['admin_field_keys'].append('can_we_contact_you')
params['admin_field_names'].append('Allowed to Contact?')
params['admin_field_hidden'].append('can_we_contact_you')
def _editPost(self, request, entity, fields):
"""See base.View._editPost().
"""
if not entity:
fields['user'] = fields['link_id']
fields['link_id'] = fields['user'].link_id
group_logic = self._params['group_logic']
group_entity = group_logic.getFromKeyName(fields['scope_path'])
fields['program'] = group_entity.scope
fields['agreed_to_tos'] = fields['agreed_to_admin_agreement']
super(View, self)._editPost(request, entity, fields)
def _acceptInvitePost(self, fields, request, context, params, **kwargs):
"""Fills in the fields that were missing in the invited_created_form.
For params see base.View._acceptInvitePost()
"""
# fill in the appropriate fields that were missing in the form
fields['agreed_to_tos'] = fields['agreed_to_admin_agreement']
group_logic = params['group_logic']
group_entity = group_logic.getFromKeyName(fields['scope_path'])
fields['program'] = group_entity.scope
def _editGet(self, request, entity, form):
"""Sets the content of the agreed_to_tos_on field and replaces.
Also replaces the agreed_to_tos field with a hidden field when the ToS has been signed.
For params see base.View._editGet().
"""
if entity.agreed_to_tos:
form.fields['agreed_to_admin_agreement'] = forms.fields.BooleanField(
widget=forms.HiddenInput, initial=entity.agreed_to_tos,
required=True)
super(View, self)._editGet(request, entity, form)
def _editContext(self, request, context):
"""See base.View._editContext.
"""
from soc.logic.models.org_app_survey import logic as org_app_logic
entity = context['entity']
form = context['form']
if 'scope_path' in form.initial:
scope_path = form.initial['scope_path']
elif 'scope_path' in request.POST:
scope_path = request.POST['scope_path']
else:
form.fields['admin_agreement'] = None
return
org_entity = self._params['group_logic'].getFromKeyNameOr404(scope_path)
org_app = org_app_logic.getForProgramOr404(org_entity.scope)
if org_app:
user_entity = context['user']
fields = {'main_admin': user_entity,
'survey': org_app}
record_logic = org_app_logic.getRecordLogic()
org_app_record = record_logic.getForFields(fields, unique=True)
if not entity and org_app_record:
form.fields['agreed_to_admin_agreement'].initial = True
if not (org_entity and org_entity.scope and
org_entity.scope.org_admin_agreement):
return
agreement = org_entity.scope.org_admin_agreement
content = agreement.content
params = {'url_name': 'document'}
widget = form.fields['admin_agreement'].widget
widget.text = content
widget.url = redirects.getPublicRedirect(agreement, params)
view = View()
accept_invite = responses.redirectLegacyRequest
admin = responses.redirectLegacyRequest
create = responses.redirectLegacyRequest
delete = responses.redirectLegacyRequest
edit = responses.redirectLegacyRequest
invite = responses.redirectLegacyRequest
list = responses.redirectLegacyRequest
manage = responses.redirectLegacyRequest
process_request = responses.redirectLegacyRequest
public = responses.redirectLegacyRequest
export = responses.redirectLegacyRequest
| |
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from richtigTanken.serializers import UserSerializer, GroupSerializer, FahrtDatenSerializer, UserPositionsSerializer #hieranders
from models import FahrtDaten, UserPositions
from django.http import HttpResponse
from django.http import JsonResponse
from django.template import RequestContext, loader
from django.shortcuts import render
from django.views.decorators.http import require_http_methods
import json
from models import UserPositions, FahrtDaten, Tankstellen, BenzinPreis
import datetime
import math
import copy
tankstand = 45.0
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class FahrtDatenViewSet(viewsets.ModelViewSet):
queryset = FahrtDaten.objects.all()
serializer_class = FahrtDatenSerializer
class UserPositionsViewSet(viewsets.ModelViewSet):
queryset = UserPositions.objects.all()
serializer_class = UserPositionsSerializer
class BenzinPreisViewSet(viewsets.ModelViewSet):
queryset = BenzinPreis.objects.all()
def index(request):
return render(request, 'richtigTanken/index.html')
#tankstand = 50
def getAllGasStations(request):
data = { 'stations': [] }
for elem in Tankstellen.objects.all():
station = {
'name': elem.bezeichnung,
'lat': elem.position_x,
'lng': elem.position_y
}
data['stations'].append(station)
return JsonResponse(data, safe=False)
def getGasStations(request):
#json_text = '{"stations":['
#for elem in Tankstellen.objects.all():
# json_text = json_text + '{' + "name:" + elem.bezeichnung + ',' + "lat:" + str(elem.position_x) + ',' + "lng:" + str(elem.position_y) + '},'
#json_text = json_text + ']}'
#print(json_text)
#json.loads(json_text)
data = {
'stations': [
{ 'lat': 52.53398, 'lng': 13.409852 },
{ 'lat': 52.50178, 'lng': 13.404832 },
{ 'lat': 52.50048, 'lng': 13.409842 },
{ 'lat': 52.50195, 'lng': 13.406882 },
]
}
response = JsonResponse(data, safe=False)
return response
@require_http_methods(["POST",])
def endRoute(request):
positionen = UserPositions.objects.all().order_by('zeit')
current_pos = positionen[0]
distance = 0
verbrauch = 0
for elem in positionen:
delta = distance_on_unit_sphere(float(current_pos.position_x), float(current_pos.position_y), float(elem.position_x), float(elem.position_y))
distance = distance + delta
current_pos = elem
verbrauch = verbrauch + elem.benzin_delta_in_l
last_zeit = elem.zeit
distance = float('%.1f' % distance)
verbrauch = float('%.2f' % verbrauch)
UserPositions.objects.all().delete()
global tankstand
tankstand = 45.0
print(distance)
print(verbrauch)
print(request.user)
#FahrtDaten.objects.create(nutzer = request.user, strecken_laengekm = distance, spritverbrauch_in_l = verbrauch, start_zeit = positionen[0].zeit, end_zeit = last_zeit).save()
return HttpResponse("OK")
distance = float(0.5)
def normalize(vector, user_position):
length_km = distance_on_unit_sphere(float(user_position.position_x), float(user_position.position_y), (float(user_position.position_x)+vector[0]), (float(user_position.position_y)+vector[1]))
norm = distance/length_km
vector[0] = vector[0] * norm
vector[1] = vector[1] * norm
return vector
def get_around_stations():
cur = UserPositions.objects.all().order_by('-zeit')[0]
stations = list(Tankstellen.objects.all().order_by('preis'))
stationsSammel = copy.deepcopy(stations)
for elem in stationsSammel:
dist = float(distance_on_unit_sphere(float(cur.position_x), float(cur.position_y), float(elem.position_x), float(elem.position_y)))
if dist > distance:
stations.remove(elem)
return stations
def distance_on_unit_sphere(lat1, long1, lat2, long2):
# Convert latitude and longitude to
# spherical coordinates in radians.
degrees_to_radians = math.pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1)*degrees_to_radians
phi2 = (90.0 - lat2)*degrees_to_radians
# theta = longitude
theta1 = long1*degrees_to_radians
theta2 = long2*degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) =
# sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +
math.cos(phi1)*math.cos(phi2))
arc = math.acos( cos )
# return in kilometres
return (arc * 6371)
def get_ersparnis(tankstand, stations):
tankenPreis = get_trends(get_reach(tankstand))
# teste trend fuer tankenPreis[0] und vergleiche mit umgebenden tankstellen (aktuelle preise)
stations = sorted(stations, key=lambda station: station.preis)
stations = stations[0:3]
if not stations:
return [], 0
# 60 liter tank
tankstand = tankstand / 60.0
tankval = 1.0 - float(tankstand/2.0)
tankstand = tankstand * 60.0
#print(float(stations[0].preis))
#print(tankenPreis[0])
#if (float(stations[0].preis) * float(tankval)) > tankenPreis[0]:
# print("hier null")
# return [], 0
for elem in copy.deepcopy(stations):
if (float(elem.preis) * tankval) > tankenPreis[0]:
stations.remove(elem)
if not stations:
return [], 0
max_ersparnis = (float(tankenPreis[0]) - float(stations[0].preis)) * (60.0-float(tankstand))
print("Maximale ersparnis: %s" % max_ersparnis)
return stations, max_ersparnis
def average(val1, val2):
return (val1 + val2) / 2
def get_average_consumption_per_day():
drives = FahrtDaten.objects.all()
consumption = 0
for drive in drives:
consumption = consumption + drive.spritverbrauch_in_l
return consumption / 14
def get_reach(fuel_level):
average_consumption = get_average_consumption_per_day()
lasting_days = 0
today = datetime.datetime.now().date()
day = today - datetime.timedelta(days=7)
def get_daily_absolute_consumption(dayy):
drives = list(FahrtDaten.objects.all())
consumption = 0
for drive in copy.deepcopy(drives):
if drive.start_zeit.date() != dayy:
drives.remove(drive)
for drive in drives:
consumption = consumption + drive.spritverbrauch_in_l
return float(consumption)
while fuel_level > 0 and day != today:
lasting_days = lasting_days + 1
fuel_level = fuel_level - average(get_daily_absolute_consumption(day), float(average_consumption))
day = day + datetime.timedelta(days=1)
return lasting_days
def get_trends(daysCount):
result = []
year = 2015
month = 02
day = 8
hour = 16
for i in range(0,daysCount):
date = datetime.datetime(year, month, day + i, hour)
tanken = BenzinPreis.objects.all().filter(start_zeit=date)
tankenPreis = 0
for tanke in tanken:
tankenPreis = tankenPreis + tanke.preis
if tanken:
result.append(tankenPreis / len(tanken))
return result
def get_average_consumption_per_track():
drives = FahrtDaten.objects.all()
consumption = 0
track = 0
for drive in drives:
consumption = consumption + drive.spritverbrauch_in_l
track = track + drive.streckenlaengekm
return consumption / track
def get_near_stations(request, tankstand):
waypoints = UserPositions.objects.all().order_by('zeit')
direction = [0.0,0.0]
cur = waypoints[0]
for elem in waypoints:
direction[0] = direction[0] + float(elem.position_x - cur.position_x)
direction[1] = direction[1] + float(elem.position_y - cur.position_y)
cur = elem
direction = normalize(direction, waypoints[0])
direction_rotate = [direction[1], -direction[0]]
left_point = [float(cur.position_x) - 0.5 * direction_rotate[0], float(cur.position_y) - 0.5 * direction_rotate[1]]
stations = get_around_stations()
stationsSammel = copy.deepcopy(stations)
for station in stationsSammel:
helper = (float(station.position_x) - left_point[0]) / direction_rotate[0]
if (direction_rotate[1] * helper + left_point[1] < station.position_y):
stations.remove(station)
for station in stations:
print(station.bezeichnung)
stations, max_ersparnis = get_ersparnis(tankstand, stations)
if max_ersparnis > 0:
max_ersparnis = max_ersparnis + 0.5
if max_ersparnis < 0:
max_ersparnis = 0
farbe = 'green'
if tankstand < 40.0:
farbe = 'gelb'
if tankstand < 20.0:
farbe = 'rot'
data = {
'ersparnis': max_ersparnis,
'farbe': farbe,
'stations': [] }
for elem in stations:
station = {
'name': elem.bezeichnung,
'benzin': "%s" % float(elem.preis),
'lat': elem.position_x,
'lng': elem.position_y
}
data['stations'].append(station)
return JsonResponse(data, safe=False)
@require_http_methods(["POST"])
def addWaypoint(request):
json_data = json.loads(request.body)
x = json_data['lat']
y = json_data['lng']
verbrauch = json_data['verbrauch']
neuerWert = UserPositions.objects.create(zeit = datetime.datetime.now(), benzin_delta_in_l = verbrauch, position_x = x, position_y = y)
neuerWert.save()
global tankstand
tankstand = float(tankstand) - 0.7
if tankstand < 5.0:
tankstand = 55.0
print(tankstand)
return get_near_stations(request, tankstand)
| |
import re
import os
# from app.subsystem.courses import Course
from app.subsystem.courses.academicprogram import AcademicProgram
from app.subsystem.courses.course import Course
from app.subsystem.courses.option import Option
from app.subsystem.courses.option import OptionChoices
from app.subsystem.courses.option import TypeChoices
# from app.subsystem.courses import AcademicProgram
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Populates credits from json file to correct errors'
def handle(self, *args, **options):
# print(os.getcwd())
gameslist = open(os.getcwd()+"\\app\\management\\commands\\computergamescourse.txt", "r").readlines()
avionicslist = open(os.getcwd()+"\\app\\management\\commands\\avionicscourses.txt", "r").readlines()
webcourselist = open(os.getcwd()+"\\app\\management\\commands\\webcourses.txt", "r").readlines()
generalcourseslist = open(os.getcwd()+"\\app\\management\\commands\\generalcourses.txt", "r").readlines()
P = AcademicProgram(name="SOEN", credits=120)
P.save()
for lines in gameslist:
dptnum = re.search('[A-Z]{4}\d{3}', lines)
dptnum = dptnum.group(0)
dpt = dptnum[:4]
num = dptnum[-3:]
TechnicalType = re.search("Technical", lines)
TechnicalStarType = re.search("Tech\W", lines)
ScienceType = re.search("Science", lines)
GeneralEleType = re.search("GeneralElective", lines)
RequiredType = re.search("Required", lines)
RequiredStarType = re.search("Requir\W", lines)
if not RequiredType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, RequiredType, P.name, "1st option")
O = Option(name= "Games", course=C, academicprogram=P, option=OptionChoices.GAMES, type=TypeChoices.REQUIRED, atleast_one=False)
O.save()
elif not RequiredStarType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, RequiredStarType, P.name, "1st option")
O = Option(name= "Games", course=C, academicprogram=P, option=OptionChoices.GAMES, type=TypeChoices.REQUIRED, atleast_one=True)
O.save()
elif not GeneralEleType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, GeneralEleType, P.name, "1st option")
O = Option(name= "Games", course=C, academicprogram=P, option=OptionChoices.GAMES, type=TypeChoices.GENERAL_ELECTIVES, atleast_one=False)
O.save()
elif not ScienceType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, ScienceType, P.name, "1st option")
O = Option(name= "Games", course=C, academicprogram=P, option=OptionChoices.GAMES, type=TypeChoices.SCIENCE, atleast_one=False)
O.save()
elif not TechnicalType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, TechnicalType, P.name, "1st option")
O = Option(name= "Games", course=C, academicprogram=P, option=OptionChoices.GAMES, type=TypeChoices.TECHNICAL, atleast_one=False)
O.save()
elif not TechnicalStarType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, TechnicalStarType, P.name, "1st option")
O = Option(name= "Games", course=C, academicprogram=P, option=OptionChoices.GAMES, type=TypeChoices.TECHNICAL, atleast_one=True)
O.save()
for lines in avionicslist:
dptnum = re.search('[A-Z]{4}\d{3}', lines)
dptnum = dptnum.group(0)
dpt = dptnum[:4]
num = dptnum[-3:]
TechnicalType = re.search("Technical", lines)
TechnicalStarType = re.search("Tech\W", lines)
ScienceType = re.search("Science", lines)
GeneralEleType = re.search("GeneralElective", lines)
RequiredType = re.search("Required", lines)
RequiredStarType = re.search("Requir\W", lines)
if not RequiredType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, RequiredType, P.name, "3rd option")
O = Option(name= "Avionics", course=C, academicprogram=P, option=OptionChoices.AVIONICS, type=TypeChoices.REQUIRED, atleast_one=False)
O.save()
elif not RequiredStarType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, RequiredStarType, P.name, "3rd option")
O = Option(name= "Avionics", course=C, academicprogram=P, option=OptionChoices.AVIONICS, type=TypeChoices.REQUIRED, atleast_one=True)
O.save()
elif not GeneralEleType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, GeneralEleType, P.name, "3rd option")
O = Option(name= "Avionics", course=C, academicprogram=P, option=OptionChoices.AVIONICS, type=TypeChoices.GENERAL_ELECTIVES, atleast_one=False)
O.save()
elif not ScienceType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, ScienceType, P.name, "3rd option")
O = Option(name= "Avionics", course=C, academicprogram=P, option=OptionChoices.AVIONICS, type=TypeChoices.SCIENCE, atleast_one=False)
O.save()
elif not TechnicalType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, TechnicalType, P.name, "3rd option")
O = Option(name= "Avionics", course=C, academicprogram=P, option=OptionChoices.AVIONICS, type=TypeChoices.TECHNICAL, atleast_one=False)
O.save()
elif not TechnicalStarType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, TechnicalStarType, P.name, "3rd option")
O = Option(name= "Avionics", course=C, academicprogram=P, option=OptionChoices.AVIONICS, type=TypeChoices.TECHNICAL, atleast_one=True)
O.save()
for lines in webcourselist:
dptnum = re.search('[A-Z]{4}\d{3}', lines)
dptnum = dptnum.group(0)
dpt = dptnum[:4]
num = dptnum[-3:]
TechnicalType = re.search("Technical", lines)
TechnicalStarType = re.search("Tech\W", lines)
ScienceType = re.search("Science", lines)
GeneralEleType = re.search("GeneralElective", lines)
RequiredType = re.search("Required", lines)
RequiredStarType = re.search("Requir\W", lines)
if not RequiredType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, RequiredType, P.name, "2nd option")
O = Option(name= "Webs", course=C, academicprogram=P, option=OptionChoices.WEB, type=TypeChoices.REQUIRED, atleast_one=False)
O.save()
elif not RequiredStarType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, RequiredStarType, P.name, "2nd option")
O = Option(name= "Webs",course=C, academicprogram=P, option=OptionChoices.WEB, type=TypeChoices.REQUIRED, atleast_one=True)
O.save()
elif not GeneralEleType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, GeneralEleType, P.name, "2nd option")
O = Option(name= "Webs",course=C, academicprogram=P, option=OptionChoices.WEB, type=TypeChoices.GENERAL_ELECTIVES, atleast_one=False)
O.save()
elif not ScienceType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, ScienceType, P.name, "2nd option")
O = Option(name= "Webs",course=C, academicprogram=P, option=OptionChoices.WEB, type=TypeChoices.SCIENCE, atleast_one=False)
O.save()
elif not TechnicalType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, TechnicalType, P.name, "2nd option")
O = Option(name= "Webs",course=C, academicprogram=P, option=OptionChoices.WEB, type=TypeChoices.TECHNICAL, atleast_one=False)
O.save()
elif not TechnicalStarType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, TechnicalStarType, P.name, "2nd option")
O = Option(name= "Webs",course=C, academicprogram=P, option=OptionChoices.WEB, type=TypeChoices.TECHNICAL, atleast_one=True)
O.save()
for lines in generalcourseslist:
dptnum = re.search('[A-Z]{4}\d{3}', lines)
dptnum = dptnum.group(0)
dpt = dptnum[:4]
num = dptnum[-3:]
TechnicalType = re.search("Technical", lines)
TechnicalStarType = re.search("Tech\W", lines)
ScienceType = re.search("Science", lines)
GeneralEleType = re.search("GeneralElective", lines)
RequiredType = re.search("Required", lines)
RequiredStarType = re.search("Requir\W", lines)
if not RequiredType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, RequiredType, P.name, "4th option")
O = Option(name= "General", course=C, academicprogram=P, option=OptionChoices.GENERAL, type=TypeChoices.REQUIRED, atleast_one=False)
O.save()
elif not RequiredStarType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, RequiredStarType, P.name, "4th option")
O = Option(name= "General", course=C, academicprogram=P, option=OptionChoices.GENERAL, type=TypeChoices.REQUIRED, atleast_one=True)
O.save()
elif not GeneralEleType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, GeneralEleType, P.name, "4th option")
O = Option(name= "General", course=C, academicprogram=P, option=OptionChoices.GENERAL, type=TypeChoices.GENERAL_ELECTIVES, atleast_one=False)
O.save()
elif not ScienceType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, ScienceType, P.name, "4th option")
O = Option(name= "General", course=C, academicprogram=P, option=OptionChoices.GENERAL, type=TypeChoices.SCIENCE, atleast_one=False)
O.save()
elif not TechnicalType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, TechnicalType, P.name, "4th option")
O = Option(name= "General", course=C, academicprogram=P, option=OptionChoices.GENERAL, type=TypeChoices.TECHNICAL, atleast_one=False)
O.save()
elif not TechnicalStarType is None:
C = Course.objects.get(pk=dptnum)
# print(C.deptnum, TechnicalStarType, P.name, "4th option")
O = Option(name= "General", course=C, academicprogram=P, option=OptionChoices.GENERAL, type=TypeChoices.TECHNICAL, atleast_one=True)
O.save()
| |
import boto3
import re
import os
import socket
import json
# List of all EIPs in the given region
def list_eips(region, filter):
all_eips = []
print("Connecting to ec2...")
client = boto3.client('ec2', region_name=region)
if client:
# print("Connected!")
print("Getting all EIPs in region %s" % region)
addresses_dict = client.describe_addresses()
for eip_dict in addresses_dict['Addresses']:
if eip_dict['PublicIp'] not in filter:
all_eips.append(eip_dict['PublicIp'])
return all_eips
# List IPs of load balancer
def list_balancer_ips(dns_name):
print (" Getting IP(s) for URL %s..." % dns_name)
return socket.gethostbyname_ex(dns_name)[2]
# Name of active load balancer
def get_active_balancer(dns_name, region):
print('Finding the active load balancer behind %s' % dns_name)
lb_name = None
print("Connecting to route53...")
r53_client = boto3.client('route53', region_name=region)
if r53_client:
# print("Connected!")
zones = r53_client.list_hosted_zones()
chosen_zone = None
print("Looking up zone ID...")
temp_dns_name = dns_name.split('.', 1)[-1]
print("Temp dns_name is %s" % temp_dns_name)
for zone in zones['HostedZones']:
# print ("The zone is: %s, the dns_name is %s" % (zone['Name'][:-1], dns_name))
if zone['Name'][:-1] == dns_name:
print("Found zone that equals the dns name: %s" % zone['Name'])
chosen_zone = zone['Id'][12:]
break
elif zone['Name'][:-1] == temp_dns_name:
print("Found zone that equals the temp dns name: %s" % zone['Name'])
chosen_zone = zone['Id'][12:]
if chosen_zone:
print("Retrieving record sets...")
rset = r53_client.list_resource_record_sets(HostedZoneId=chosen_zone,
StartRecordName=dns_name,
StartRecordType="A",
MaxItems="1")['ResourceRecordSets'][0]
print("Record set retrieved is : ")
print(rset)
if 'AliasTarget' in rset:
lb_name = rset['AliasTarget']['DNSName']
if lb_name.startswith("dualstack"):
lb_name = lb_name.split("dualstack.")[1]
# Split on periods, take the first group (lbname dns), split on hyphens and take all but the end and rejoin with hyphens
lb_name = "-".join(lb_name.split(".")[0].split("-")[:-1])
print("Retrieved load-balancer: " + str(lb_name))
else:
print("ERROR: Failed to connect to R53")
return lb_name
# All Classic load balancers
def _get_v1_lbs(elb_client, next_marker=None):
"""Get the v1 ELBs"""
result = []
if next_marker:
query_result = elb_client.describe_load_balancers(Marker=next_marker)
else:
query_result = elb_client.describe_load_balancers()
if 'NextMarker' in query_result:
result.extend(query_result['LoadBalancerDescriptions'])
result.extend(_get_v1_lbs(elb_client, next_marker=query_result['NextMarker']))
else:
result.extend(query_result['LoadBalancerDescriptions'])
return result
# All Application and Network load balancers
def _get_v2_lbs(elb_client, next_marker=None):
"""Get the v2 ELBs"""
result = []
if next_marker:
query_result = elb_client.describe_load_balancers(Marker=next_marker)
else:
query_result = elb_client.describe_load_balancers()
if 'NextMarker' in query_result:
result.extend(query_result['LoadBalancers'])
result.extend(_get_v2_lbs(elb_client, next_marker=query_result['NextMarker']))
else:
result.extend(query_result['LoadBalancers'])
return result
# Get all instances in a target group
def _get_instances_for_target_group(elbv2, tg_arn, target_type, region):
instances = []
ec2_client = boto3.client('ec2', region_name=region)
if ec2_client:
tg_health_desc = elbv2.describe_target_health(TargetGroupArn=tg_arn)
if 'instance' in target_type:
print('Target Type is instance - can get the instances directly from this target group')
found_instances = [inst['Target']['Id'] for inst in tg_health_desc['TargetHealthDescriptions']]
print("Instances discovered: %s" % str(found_instances))
instances.extend(found_instances)
elif 'ip' in target_type:
print('Target Type is ip - need to determine what the IP is attached to')
for target in tg_health_desc['TargetHealthDescriptions']:
ip = target['Target']['Id']
filter = {'Name': 'addresses.private-ip-address', 'Values': [ip]}
query_result = ec2_client.describe_network_interfaces(Filters=[filter])
if 'NetworkInterfaces' in query_result:
nic_details = query_result['NetworkInterfaces'][0]
# Make sure this is an ELB
if 'amazon-elb' in nic_details['Attachment']['InstanceOwnerId']:
# get the lb_name this IP is attached to
# 'ELB app/awseb-AWSEB-19KDLWH6ZMJA2/f8992902ed546a45'
interface_description = nic_details['Description']
lb_name = interface_description.split('/')[1]
print('Given IP target is attached to load balancer with name: %s' % lb_name)
query_result = elbv2.describe_load_balancers(Names=[lb_name])
if 'LoadBalancers' in query_result:
lb_details = query_result['LoadBalancers'][0]
lb_arn = lb_details['LoadBalancerArn']
# Get the target groups for this load balancer
response = elbv2.describe_target_groups(LoadBalancerArn=lb_arn)
if 'TargetGroups' in response:
lb_tgs = response['TargetGroups']
# print('LB Target Groups: %s' % str(lb_tgs))
for tg in lb_tgs:
# print("target group: %s" % str(tg))
tg_arn = tg['TargetGroupArn']
target_type = tg['TargetType']
instances.extend(_get_instances_for_target_group(elbv2, tg_arn, target_type, region))
else:
print('Target Type is: %s - unhandled' % target_type)
else:
print("ERROR: Failed to connect to EC2")
return list(set(instances))
# Get the public IPs for the given instances
def _get_instances_public_ip(ec2_client, instances):
instance_ips = []
reservations = ec2_client.describe_instances(InstanceIds=instances)['Reservations']
for r in reservations:
for instance in r['Instances']:
if 'PublicIpAddress' in instance:
instance_ips.append(instance['PublicIpAddress'])
else:
print("The instance %s has no public IP" % str(instance['InstanceId']))
return list(set(instance_ips))
# IPs of running instances
def list_instance_ips(lb_name, region):
print("Looking for instances behind load balancer %s..." % lb_name)
instance_ips = []
lb_found = False
print("Connecting to ec2 elb v1...")
elbv1 = boto3.client('elb', region_name=region)
if elbv1:
# print("Connected!")
print("Retrieving classic load balancers...")
v1_lbs = _get_v1_lbs(elbv1, next_marker=None)
ec2_client = boto3.client('ec2', region_name=region)
if ec2_client:
for lb in v1_lbs:
if lb_name in lb['LoadBalancerName'].lower():
print("Found the load balancer")
print("Processing instances for ELB %s" % lb['LoadBalancerName'])
instances = [inst['InstanceId'] for inst in lb['Instances']]
print("Instances discovered: %s" % str(instances))
if instances:
instance_ips.extend(_get_instances_public_ip(ec2_client, instances))
lb_found = True
break
# Only look at v2 load balancers if we haven't already found the load balancer above
if not lb_found:
print("Didn't find the load balancer in the list of classic load balancers")
print("Connecting to ec2 elb v2...")
elbv2 = boto3.client('elbv2', region_name=region)
if elbv2:
# print("Connected!")
print("Retrieving V2 load balancers...")
v2_lbs = _get_v2_lbs(elbv2, next_marker=None)
for lb in v2_lbs:
if lb_name in lb['LoadBalancerName'].lower():
print("Found the load balancer")
print("Processing target groups for %s" % lb['LoadBalancerName'])
lb_arn = lb['LoadBalancerArn']
# Get the target groups for this load balancer
response = elbv2.describe_target_groups(LoadBalancerArn=lb_arn)
if 'TargetGroups' in response:
lb_tgs = response['TargetGroups']
# print('LB Target Groups: %s' % str(lb_tgs))
for tg in lb_tgs:
# print("target group: %s" % str(tg))
tg_arn = tg['TargetGroupArn']
target_type = tg['TargetType']
instances = _get_instances_for_target_group(elbv2, tg_arn, target_type, region)
if instances:
instance_ips.extend(_get_instances_public_ip(ec2_client, instances))
lb_found = True
break
if not lb_found:
print("Didn't find the load balancer in the list of application/network load balancers")
else:
print("ERROR: Failed to connect to ELBV2")
else:
print("ERROR: Failed to connect to EC2")
else:
print("ERROR: Failed to connect to ELB")
return instance_ips
# Get a file from S3
def get_file(bucket_name, s3_path, local_path):
result = False
if os.path.isfile(local_path):
print("Deleting current file...")
os.remove(local_path)
print("Done")
print("Retrieving config file...")
s3 = boto3.resource('s3')
s3.Bucket(bucket_name).download_file(s3_path, local_path)
print("Done")
if os.path.exists(local_path):
result = True
return result
# Get a file date from S3
def get_file_date(bucket_name, s3_path):
print("Retrieving config file date...")
s3 = boto3.resource('s3')
file_object = s3.Object(bucket_name,s3_path)
file_date = file_object.last_modified
print("Done")
return file_date
# Get json file contents from S3
def get_file_contents(bucket_name, s3_path):
result = None
print(f"Retrieving config file ({bucket_name}/{s3_path})")
session = boto3.session.Session()
s3_client = session.client('s3')
try:
response = s3_client.get_object(Bucket=bucket_name, Key=s3_path)
if 'Body' in response:
result = json.loads(response['Body'].read().decode())
except Exception as e:
print(f"Exception fetching S3 object ({bucket_name}/{s3_path})" + str(e))
print("Done")
return result
def get_all_records(r53_client, zone_id, start_record_name=None, start_record_type=None, start_record_identifier=None):
result = []
query_result = None
if start_record_identifier:
query_result = r53_client.list_resource_record_sets(HostedZoneId=zone_id,
StartRecordName=start_record_name,
StartRecordType=start_record_type,
StartRecordIdentifier=start_record_identifier)
else:
if start_record_name:
query_result = r53_client.list_resource_record_sets(HostedZoneId=zone_id,
StartRecordName=start_record_name,
StartRecordType=start_record_type)
else:
query_result = r53_client.list_resource_record_sets(HostedZoneId=zone_id)
if query_result:
if 'IsTruncated' in query_result and query_result['IsTruncated']:
# print('Found %s records' % query_result['MaxItems'])
s_r_n = query_result['NextRecordName']
s_r_t = query_result['NextRecordType']
s_r_i = None
if 'NextRecordIdentifier' in query_result:
s_r_i = query_result['NextRecordIdentifier']
result.extend(query_result['ResourceRecordSets'])
result.extend(get_all_records(r53_client, zone_id, s_r_n, s_r_t, s_r_i))
else:
# print('Found %s records' % query_result['MaxItems'])
result.extend(query_result['ResourceRecordSets'])
return result
# Return prefixed record sets of a hosted zone ID
def get_records_from_zone(zone_id, record_prefixes):
print(" Enter get records from zone")
entries = []
r53_client = boto3.client('route53')
if r53_client:
#Kinda hacky to support both arrays and strings as a value
if not isinstance(record_prefixes, list):
record_prefixes = [record_prefixes]
print(" record_prefixes: " + str(record_prefixes))
# Get all records:
resource_record_sets = get_all_records(r53_client, zone_id)
print(' Found %s resource records for zone %s' % (str(len(resource_record_sets)), zone_id))
for record in resource_record_sets:
for prefix in record_prefixes:
try:
if re.match(prefix,record['Name']):
if 'ResourceRecords' in record:
entry = record['ResourceRecords'][0]['Value']
# Check if it's not an IP address.. Since the way this is coded it's easier than
# checking the type (we're searching for an A record)
if not re.match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$",entry):
try:
for addr in [str(i[4][0]) for i in socket.getaddrinfo(entry, 80)]:
if addr not in entries:
entries.append(addr)
# Nothing we can do
except Exception:
continue
else:
entries.append(entry)
except Exception:
print(' Exception trying to match records')
continue
print(' Found %s records that match given prefix' % (str(len(set(entries)))))
return list(set(entries))
def get_zone_records(zone_id):
resource_record_sets = []
r53_client = boto3.client('route53')
if r53_client:
resource_record_sets = get_all_records(r53_client, zone_id)
return resource_record_sets
# Given a list of resource record sets, return prefixed record sets matching given prefix(es)
def get_matching_records(resource_record_sets, record_prefixes):
entries = []
if not isinstance(record_prefixes, list):
record_prefixes = [record_prefixes]
print(f" record_prefixes: {record_prefixes}")
for record in resource_record_sets:
for prefix in record_prefixes:
try:
if re.match(prefix, record['Name']):
if 'ResourceRecords' in record:
entry = record['ResourceRecords'][0]['Value']
# Check if it's not an IP address.. Since the way this is coded it's easier than
# checking the type (we're searching for an A record)
if not re.match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$", entry):
try:
for address in [str(i[4][0]) for i in socket.getaddrinfo(entry, 80)]:
if address not in entries:
entries.append(address)
# Nothing we can do
except Exception:
continue
else:
entries.append(entry)
except Exception:
print(' Exception trying to match records')
continue
print(' Found %s records that match given prefix' % (str(len(set(entries)))))
return list(set(entries))
| |
#!/usr/bin/env python
"""
The WiPy firmware update script. Transmits the specified firmware file
over FTP, and then resets the WiPy and optionally verifies that software
was correctly updated.
Usage:
./update-wipy.py --file "path_to_mcuimg.bin" --verify
Or:
python update-wipy.py --file "path_to_mcuimg.bin"
"""
import sys
import argparse
import time
import socket
from ftplib import FTP
from telnetlib import Telnet
def print_exception(e):
print("Exception: {}, on line {}".format(e, sys.exc_info()[-1].tb_lineno))
def ftp_directory_exists(ftpobj, directory_name):
filelist = []
ftpobj.retrlines("LIST", filelist.append)
for f in filelist:
if f.split()[-1] == directory_name:
return True
return False
def transfer_file(args):
with FTP(args.ip, timeout=20) as ftp:
print("FTP connection established")
if "230" in ftp.login(args.user, args.password):
print("Login successful")
if "250" in ftp.cwd("/flash"):
if not ftp_directory_exists(ftp, "sys"):
print("/flash/sys directory does not exist")
if not "550" in ftp.mkd("sys"):
print("/flash/sys directory created")
else:
print("Error: cannot create /flash/sys directory")
return False
if "250" in ftp.cwd("sys"):
print("Entered '/flash/sys' directory")
with open(args.file, "rb") as fwfile:
print("Firmware image found, initiating transfer...")
if "226" in ftp.storbinary("STOR " + "mcuimg.bin", fwfile, 512):
print("File transfer complete")
return True
else:
print("Error: file transfer failed")
else:
print("Error: cannot enter /flash/sys directory")
else:
print("Error: cannot enter /flash directory")
else:
print("Error: ftp login failed")
return False
def reset_board(args):
success = False
try:
tn = Telnet(args.ip, timeout=5)
print("Connected via Telnet, trying to login now")
if b"Login as:" in tn.read_until(b"Login as:", timeout=5):
tn.write(bytes(args.user, "ascii") + b"\r\n")
if b"Password:" in tn.read_until(b"Password:", timeout=5):
# needed because of internal implementation details of the WiPy's telnet server
time.sleep(0.2)
tn.write(bytes(args.password, "ascii") + b"\r\n")
if b'Type "help()" for more information.' in tn.read_until(
b'Type "help()" for more information.', timeout=5
):
print("Telnet login succeeded")
tn.write(b"\r\x03\x03") # ctrl-C twice: interrupt any running program
time.sleep(1)
tn.write(b"\r\x02") # ctrl-B: enter friendly REPL
if b'Type "help()" for more information.' in tn.read_until(
b'Type "help()" for more information.', timeout=5
):
tn.write(b"import machine\r\n")
tn.write(b"machine.reset()\r\n")
time.sleep(2)
print("Reset performed")
success = True
else:
print("Error: cannot enter friendly REPL")
else:
print("Error: telnet login failed")
except Exception as e:
print_exception(e)
finally:
try:
tn.close()
except Exception as e:
pass
return success
def verify_update(args):
success = False
firmware_tag = ""
def find_tag(tag):
if tag in firmware_tag:
print("Verification passed")
return True
else:
print("Error: verification failed, the git tag doesn't match")
return False
retries = 0
while True:
try:
# Specify a longer time out value here because the board has just been
# reset and the wireless connection might not be fully established yet
tn = Telnet(args.ip, timeout=10)
print("Connected via telnet again, lets check the git tag")
break
except socket.timeout:
if retries < 5:
print("Timeout while connecting via telnet, retrying...")
retries += 1
else:
print("Error: Telnet connection timed out!")
return False
try:
firmware_tag = tn.read_until(b"with CC3200")
tag_file_path = args.file.rstrip("mcuimg.bin") + "genhdr/mpversion.h"
if args.tag is not None:
success = find_tag(bytes(args.tag, "ascii"))
else:
with open(tag_file_path) as tag_file:
for line in tag_file:
bline = bytes(line, "ascii")
if b"MICROPY_GIT_HASH" in bline:
bline = (
bline.lstrip(b"#define MICROPY_GIT_HASH ")
.replace(b'"', b"")
.replace(b"\r", b"")
.replace(b"\n", b"")
)
success = find_tag(bline)
break
except Exception as e:
print_exception(e)
finally:
try:
tn.close()
except Exception as e:
pass
return success
def main():
cmd_parser = argparse.ArgumentParser(
description="Update the WiPy firmware with the specified image file"
)
cmd_parser.add_argument("-f", "--file", default=None, help="the path of the firmware file")
cmd_parser.add_argument("-u", "--user", default="micro", help="the username")
cmd_parser.add_argument("-p", "--password", default="python", help="the login password")
cmd_parser.add_argument("--ip", default="192.168.1.1", help="the ip address of the WiPy")
cmd_parser.add_argument(
"--verify", action="store_true", help="verify that the update succeeded"
)
cmd_parser.add_argument("-t", "--tag", default=None, help="git tag of the firmware image")
args = cmd_parser.parse_args()
result = 1
try:
if args.file is None:
raise ValueError("the image file path must be specified")
if transfer_file(args):
if reset_board(args):
if args.verify:
print("Waiting for the WiFi connection to come up again...")
# this time is to allow the system's wireless network card to
# connect to the WiPy again.
time.sleep(5)
if verify_update(args):
result = 0
else:
result = 0
except Exception as e:
print_exception(e)
finally:
sys.exit(result)
if __name__ == "__main__":
main()
| |
""" Module handles the heavy lifting, building the various site directories. """
import git, shutil, os, yaml, tempfile, datetime, time, copy
from drupdates.utils import Utils
from drupdates.settings import Settings
from drupdates.settings import DrupdatesError
from drupdates.drush import Drush
from drupdates.constructors.pmtools import Pmtools
from drupdates.sitebuild import Sitebuild
from git import Repo
from git import Actor
class DrupdatesUpdateError(DrupdatesError):
""" Parent Drupdates site update error. """
class Siteupdate(object):
""" Update the modules and/or core in a completely built Drupal site. """
def __init__(self, site_name, ssh, working_dir):
self.settings = Settings()
self.working_branch = self.settings.get('workingBranch')
self._site_name = site_name
self.working_dir = working_dir
self.site_dir = os.path.join(working_dir, self._site_name)
self.ssh = ssh
self.utilities = Utils()
self.site_web_root = None
self._commit_hash = None
self.repo_status = None
self.sub_sites = Drush.get_sub_site_aliases(self._site_name)
@property
def commit_hash(self):
""" commit_hash getter. """
return self._commit_hash
@commit_hash.setter
def commit_hash(self, value):
""" commit_hash setter. """
self._commit_hash = value
def update(self):
""" Set-up to and run Drush update(s) (i.e. up or ups). """
report = {}
self.utilities.sys_commands(self, 'preUpdateCmds')
self.repo_status = Drush.call(['st'], self._site_name, True)
try:
updates = self.run_updates()
except DrupdatesError as updates_error:
raise DrupdatesUpdateError(20, updates_error.msg)
# If no updates move to the next repo
if not updates:
self.commit_hash = ""
report['status'] = "Did not have any updates to apply"
return report
report['status'] = "The following updates were applied"
report['updates'] = updates
report['commit'] = "The commit hash is {0}".format(self.commit_hash)
self.utilities.sys_commands(self, 'postUpdateCmds')
if self.settings.get('submitDeployTicket') and self.commit_hash:
report[self._site_name] = {}
pm_name = self.settings.get('pmName').title()
try:
report[self._site_name][pm_name] = Pmtools().deploy_ticket(self._site_name,
self.commit_hash)
except DrupdatesError as api_error:
report[self._site_name][pm_name] = api_error.msg
return report
def run_updates(self):
""" Run the site updates.
The updates are done either by downloading the updates, updating the
make file or both.
- First, run drush pm-updatestatus to get a list of eligible updates for
the site/sub-sites.
- Second, build the report to return to Updates().
- Third, apply the updates.
"""
updates = {}
try:
sites = self.get_sites_to_update()
except DrupdatesError as update_status_error:
raise DrupdatesUpdateError(20, update_status_error)
if not sites['count']:
return updates
else:
sites.pop('count')
# Note: call Drush.call() without site alias as alias comes after dd argument.
drush_dd = Drush.call(['dd', '@drupdates.' + self._site_name])
self.site_web_root = drush_dd[0]
# Create seperate commits for each project (ie module/theme)
one_commit_per_project = self.settings.get('oneCommitPerProject')
# Iterate through the site/sub-sites and perform updates, update files etc...
sites_copy = copy.copy(sites)
for site, data in sites.items():
if 'modules' not in data:
sites_copy.pop(site)
continue
modules = copy.copy(data['modules'])
x = 0
for project, descriptions in data['modules'].items():
if self.settings.get('useMakeFile'):
self.update_make_file(project, descriptions['current'], descriptions['candidate'])
if one_commit_per_project:
if x:
build = Sitebuild(self._site_name, self.ssh, self.working_dir)
build.build()
self._update_code(site, [project])
modules.pop(project)
updates = self._build_commit_message(sites_copy, site, project)
self._cleanup_and_commit(updates)
x += 1
if self.settings.get('buildSource') == 'make' and self.settings.get('useMakeFile'):
self.utilities.make_site(self._site_name, self.site_dir)
elif len(modules):
self._update_code(site, modules.keys())
if not one_commit_per_project:
updates = self._build_commit_message(sites_copy)
self._cleanup_and_commit(updates)
return updates
def get_sites_to_update(self):
""" Build dictionary of sites/sub-sites and modules needing updated. """
ups_cmds = self.settings.get('upsCmds')
updates_ret = {}
count = 0
sites = {}
sites[self._site_name] = {}
for alias, data in self.sub_sites.items():
sites[alias] = {}
for site in sites:
try:
updates_ret = Drush.call(ups_cmds, site, True)
except DrupdatesError as updates_error:
parse_error = updates_error.msg.split('\n')
if parse_error[2][0:14] == "Drush message:":
# If there are not updates to apply.
continue
else:
raise updates_error
else:
# Parse the results of drush pm-updatestatus
count += len(updates_ret)
modules = {}
for module, update in updates_ret.items():
modules[module] = {}
api = update['api_version']
modules[module]['current'] = update['existing_version'].replace(api + '-', '')
modules[module]['candidate'] = update['candidate_version'].replace(api + '-', '')
msg = "Update {0} from {1} to {2}"
modules[module]['report_txt'] = msg.format(module.title(),
modules[module]['current'],
modules[module]['candidate'])
sites[site]['modules'] = modules
sites['count'] = count
return sites
def update_make_file(self, module, current, candidate):
""" Update the make file.
Keyword arguments:
module -- the drupal module or core (required)
current -- the current version
candidate -- the version to update two
"""
make_file = self.utilities.find_make_file(self._site_name, self.site_dir)
make_format = self.settings.get('makeFormat')
if make_format == 'make':
openfile = open(make_file)
makef = openfile.read()
openfile.close()
current_str = 'projects[{0}][version] = \"{1}\"'.format(module, current)
candidate_str = 'projects[{0}][version] = \"{1}\"'.format(module, candidate)
newdata = makef.replace(current_str, candidate_str)
openfile = open(make_file, 'w')
openfile.write(newdata)
openfile.close()
elif make_format == 'yaml':
make = open(make_file)
makef = yaml.load(make)
make.close()
makef['projects'][module]['version'] = str(candidate)
openfile = open(make_file, 'w')
yaml.dump(makef, openfile, default_flow_style=False)
def _update_code(self, site, modules):
""" Run drush make or pm-update to make te actual code updates.
Keyword arguments:
site -- site alias of the site to update.
modules -- list containing modules to update.
"""
up_cmds = copy.copy(self.settings.get('upCmds'))
up_cmds += modules
try:
Drush.call(up_cmds, site)
except DrupdatesError as updates_error:
raise updates_error
def _build_commit_message(self, sites, site = '', module = ''):
""" Build a commit message for one project update or multiple.
Keyword arguments:
sites -- dictionary containing meta data about update for each site.
site -- if only one site needs updated.
module -- if only one module needs updated.
"""
msg = {}
if module and site:
msg[site] = [sites[site]['modules'][module]['report_txt']]
else:
for site, data in sites.items():
msg[site] = []
for module, status in data['modules'].items():
msg[site].append(status['report_txt'])
return msg
def _cleanup_and_commit(self, updates):
""" Clean-up webroot and commit changes.
Keyword arguments:
updates -- list of update message to put in commit message.
"""
self._clean_up_web_root()
self._git_apply_changes(updates)
def _git_apply_changes(self, updates):
""" add/remove changed files.
Keyword arguments:
updates -- list of update message to put in commit message.
notes:
- Will ignore file mode changes and anything in the commonIgnore setting.
"""
os.chdir(self.site_dir)
repo = Repo(self.site_dir)
for ignore_file in self.settings.get('commonIgnore'):
try:
repo.git.checkout(os.path.join(self.site_web_root, ignore_file))
except git.exc.GitCommandError:
pass
if self.repo_status['modules'] and self.settings.get('ignoreCustomModules'):
custom_module_dir = os.path.join(self.site_web_root,
self.repo_status['modules'], 'custom')
try:
repo.git.checkout(custom_module_dir)
except git.exc.GitCommandError:
pass
# Instruct Git to ignore file mode changes.
cwriter = repo.config_writer('global')
cwriter.set_value('core', 'fileMode', 'false')
cwriter.release()
# Add new/changed files to Git's index
try:
repo.git.add('--all')
except git.exc.GitCommandError as git_add_error:
raise DrupdatesUpdateError(20, git_add_error)
# Remove deleted files from Git's index.
deleted = repo.git.ls_files('--deleted')
for filepath in deleted.split():
repo.git.rm(filepath)
# Commit all the changes.
if self.settings.get('useFeatureBranch'):
if self.settings.get('featureBranchName'):
branch_name = self.settings.get('featureBranchName')
else:
ts = time.time()
stamp = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
branch_name = "drupdates-{0}".format(stamp)
repo.git.checkout(self.working_branch, b=branch_name)
else:
branch_name = self.settings.get('workingBranch')
repo.git.checkout(self.working_branch)
msg = ''
for site, update in updates.items():
msg += "\n{0} \n {1}".format(site, '\n'.join(update))
commit_author = Actor(self.settings.get('commitAuthorName'), self.settings.get('commitAuthorEmail'))
repo.index.commit(message=msg, author=commit_author)
# Save the commit hash for the Drupdates report to use.
heads = repo.heads
branch = heads[branch_name]
self.commit_hash = branch.commit
# Push the changes to the origin repo.
repo.git.push(self._site_name, branch_name)
def _clean_up_web_root(self):
""" Clean-up artifacts from drush pm-update/core-quick-drupal. """
use_make_file = self.settings.get('useMakeFile')
if self.settings.get('buildSource') == 'make' and use_make_file:
# Remove web root folder if repo only ships a make file.
shutil.rmtree(self.site_web_root)
else:
rebuilt = self._rebuild_web_root()
if not rebuilt:
report['status'] = "The webroot re-build failed."
if use_make_file:
make_err = " Ensure the make file format is correct "
make_err += "and Drush make didn't fail on a bad patch."
report['status'] += make_err
return report
# Remove <webroot>/drush folder
drush_path = os.path.join(self.site_web_root, 'drush')
if os.path.isdir(drush_path):
self.utilities.remove_dir(drush_path)
try:
# Remove all SQLite files
os.remove(self.repo_status['db-name'])
for alias, data in self.sub_sites.items():
db_file = data['databases']['default']['default']['database']
if os.path.isfile(db_file):
os.remove(db_file)
except OSError:
pass
def _rebuild_web_root(self):
""" Rebuild the web root folder completely after running pm-update.
Drush pm-update of Drupal Core deletes the .git folder therefore need to
move the updated folder to a temp dir and re-build the webroot folder.
"""
temp_dir = tempfile.mkdtemp(self._site_name)
shutil.move(self.site_web_root, temp_dir)
add_dir = self.settings.get('webrootDir')
if add_dir:
repo = Repo(self.site_dir)
repo.git.checkout(add_dir)
else:
repo = Repo.init(self.site_dir)
try:
remote = git.Remote.create(repo, self._site_name, self.ssh)
except git.exc.GitCommandError as error:
if not error.status == 128:
msg = "Could not establish a remote for the {0} repo".format(self._site_name)
print(msg)
remote.fetch(self.working_branch)
try:
repo.git.checkout('FETCH_HEAD', b=self.working_branch)
except git.exc.GitCommandError as error:
repo.git.checkout(self.working_branch)
add_dir = self._site_name
if 'modules' in self.repo_status:
module_dir = self.repo_status['modules']
shutil.rmtree(os.path.join(self.site_web_root, module_dir))
if 'themes' in self.repo_status:
theme_dir = self.repo_status['themes']
shutil.rmtree(os.path.join(self.site_web_root, theme_dir))
self.utilities.rm_common(self.site_web_root, os.path.join(temp_dir, add_dir))
try:
Utils.copytree(os.path.join(temp_dir, add_dir),
self.site_web_root,
symlinks=True)
except OSError as copy_error:
raise DrupdatesUpdateError(20, copy_error)
except IOError as error:
msg = "Can't copy updates from: \n"
msg += "{0} temp dir to {1}\n".format(temp_dir, self.site_web_root)
msg += "Error: {0}".format(error.strerror)
raise DrupdatesUpdateError(20, msg)
shutil.rmtree(temp_dir)
return True
| |
#! usr/bin/python
# coding=utf-8
# Copyright 2014 Baidu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provides a client class for infinite.
"""
import copy
import logging
import warnings
import baidubce
from baidubce import utils
from baidubce import compat
from baidubce.auth import bce_v1_signer
from baidubce.bce_base_client import BceBaseClient
from baidubce.http import bce_http_client
from baidubce.http import handler
from baidubce.http import http_headers
from baidubce.http import http_content_types
from baidubce.http import http_methods
from baidubce.utils import required
from baidubce.services import infinite
import http.client
from baidubce.exception import BceClientError
from baidubce.exception import BceServerError
from baidubce.bce_client_configuration import BceClientConfiguration
import uuid
_logger = logging.getLogger(__name__)
def _parse_http_response(http_response, response):
if http_response.status / 100 == http.client.CONTINUE / 100:
raise BceClientError('Can not handle 1xx http status code')
if http_response.status / 100 == http.client.OK / 100:
body = http_response.read()
if body:
response.__dict__.update({'Body': body})
http_response.close()
return True
bse = BceServerError(http_response.reason, request_id=response.metadata.bce_request_id)
bse.status_code = http_response.status
http_response.close()
raise bse
class InfiniteClient(BceBaseClient):
"""
Infinite sdk client
"""
def __init__(self, config=None):
BceBaseClient.__init__(self, config)
def predict(self, endpoint_name, body,
variant_name=None, content_type='application/json', config=None,
interface='predict'):
"""
predict
:param endpoint_name: endpoint name
:type endpoint_name: string
:param body: request data
:type body: binary string or dict
:param variant_name: variant name or None
:type variant_name: string
:param content_type: content type,supports application/json,x-image,and x-recordio-protobuf
:type content_type: string
:param config: None
:type config: BceClientConfiguration
:param interface: interface_name,
several of predict/predict_proba/predict_log_proba/fit_predict/staged_predict/staged_predict are supported
depend on frameworks and algorithm used
:return: response as following format
{
Body: 'predict result'
}
:rtype: baidubce.bce_response.BceResponse
"""
params = {}
if variant_name is not None:
params['variant'] = variant_name
params['action'] = 'predict'
params['interface'] = interface
default_encoding = baidubce.DEFAULT_ENCODING
content_type = content_type + '; charset=' + default_encoding
headers = {
http_headers.CONTENT_TYPE: content_type,
http_headers.BCE_REQUEST_ID: uuid.uuid4()
}
return self._send_request(
http_method=http_methods.POST,
function_name=endpoint_name + '/invocations',
body=body,
headers=headers,
params=params,
config=config)
def debug(self, endpoint_name, body, variant_name=None,
content_type='application/json', config=None,
interface='predict'):
"""
debug
:param endpoint_name: endpoint name
:type endpoint_name: string
:param body: request data
:type body: binary or dict
:param variant_name: variant name or None
:type variant_name: string
:param content_type: content type,supports application/json,x-image,and x-recordio-protobuf
:type content_type: string
:param config: None
:type config: BceClientConfiguration
:param interface: interface_name,
several of predict/predict_proba/predict_log_proba/fit_predict/staged_predict/staged_predict are supported
depend on frameworks and algorithm used
:type config: string
:return: response as following format
{
Body: 'debug info'
}
:rtype: baidubce.bce_response.BceResponse
"""
params = {}
if variant_name is not None:
params['variant'] = variant_name
params['action'] = 'debug'
params['interface'] = interface
default_encoding = baidubce.DEFAULT_ENCODING
content_type = content_type + '; charset=' + default_encoding
headers = {
http_headers.CONTENT_TYPE: content_type,
http_headers.BCE_REQUEST_ID: uuid.uuid4()
}
return self._send_request(
http_method=http_methods.POST,
function_name=endpoint_name + '/invocations',
body=body,
headers=headers,
params=params,
config=config)
def get_endpoint_list(self, config=None):
"""
get all endpoint
:param config: None
:type config: BceClientConfiguration
:return: response as following format
{
Body: '{"endpointList":["ep1_name","ep2_name"]}'
}
:rtype: baidubce.bce_response.BceResponse
"""
headers = {
http_headers.CONTENT_TYPE: http_content_types.JSON,
http_headers.BCE_REQUEST_ID: uuid.uuid4()
}
return self._send_request(
http_method=http_methods.GET,
function_name='list',
headers=headers,
config=config)
def get_endpoint_info(self, endpoint_name, config=None):
"""
get endpoint info
:param endpoint_name: endpoint name
:type endpoint_name: string
:param config: None
:type config: BceClientConfiguration
:return: response as following format
{
Body: '{
"endpoint_uuid":"ep1",
"variant_configs":[
{
"variant_uuid":"v1",
"variant_name":"v1_name",
"...":"..."
}
]
}'
}
:rtype: baidubce.bce_response.BceResponse
"""
headers = {
http_headers.CONTENT_TYPE: http_content_types.JSON,
http_headers.BCE_REQUEST_ID: uuid.uuid4()
}
return self._send_request(
http_method=http_methods.GET,
function_name=endpoint_name + '/info',
headers=headers,
config=config)
@staticmethod
def _get_path(config, function_name=None):
return utils.append_uri(infinite.URL_PREFIX, compat.convert_to_bytes(function_name))
def _merge_config(self, config):
if config is None:
return self._convert_config(self.config)
else:
new_config = copy.copy(self.config)
new_config.merge_non_none_values(config)
new_config = self._convert_config(new_config)
return new_config
def _convert_config(self, config=None):
if config is not None:
if config.endpoint is not None:
config.endpoint = compat.convert_to_bytes(config.endpoint)
if config.credentials is not None:
config.credentials.access_key_id = \
compat.convert_to_bytes(config.credentials.access_key_id)
config.credentials.secret_access_key = \
compat.convert_to_bytes(config.credentials.secret_access_key)
return config
def _send_request(
self, http_method, function_name=None,
body=None, headers=None, params=None,
config=None,
body_parser=None):
config = self._merge_config(config)
path = InfiniteClient._get_path(config, function_name)
if body_parser is None:
body_parser = _parse_http_response
return bce_http_client.send_request(
config, bce_v1_signer.sign, [body_parser],
http_method, path, body, headers, params)
| |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""A "modified ResNet model" in Haiku with support for both DKS and TAT."""
import math
from typing import Any, Callable, Mapping, Optional, Sequence, Union
from dks.jax import activation_transform
from dks.jax import haiku_initializers
import haiku as hk
import jax.numpy as jnp
FloatStrOrBool = Union[str, float, bool]
BN_CONFIG = {
"create_offset": True,
"create_scale": True,
"decay_rate": 0.999,
}
class BlockV1(hk.Module):
"""ResNet V1 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bottleneck: bool,
use_batch_norm: bool,
activation: Callable[[jnp.ndarray], jnp.ndarray],
shortcut_weight: Optional[float],
w_init: Optional[Any],
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
self.use_batch_norm = use_batch_norm
self.shortcut_weight = shortcut_weight
if self.use_projection and self.shortcut_weight != 0.0:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="shortcut_conv")
if use_batch_norm:
self.proj_batchnorm = hk.BatchNorm(
name="shortcut_batchnorm", **BN_CONFIG)
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_0")
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_1")
layers = (conv_0, conv_1)
if use_batch_norm:
bn_0 = hk.BatchNorm(name="batchnorm_0", **BN_CONFIG)
bn_1 = hk.BatchNorm(name="batchnorm_1", **BN_CONFIG)
bn_layers = (bn_0, bn_1)
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_2")
layers = layers + (conv_2,)
if use_batch_norm:
bn_2 = hk.BatchNorm(name="batchnorm_2", **BN_CONFIG)
bn_layers += (bn_2,)
self.bn_layers = bn_layers
self.layers = layers
self.activation = activation
def __call__(self, inputs, is_training, test_local_stats):
out = shortcut = inputs
if self.use_projection and self.shortcut_weight != 0.0:
shortcut = self.proj_conv(shortcut)
if self.use_batch_norm:
shortcut = self.proj_batchnorm(shortcut, is_training, test_local_stats)
for i, conv_i in enumerate(self.layers):
out = conv_i(out)
if self.use_batch_norm:
out = self.bn_layers[i](out, is_training, test_local_stats)
if i < len(self.layers) - 1: # Don't apply activation on last layer
out = self.activation(out)
if self.shortcut_weight is None:
return self.activation(out + shortcut)
elif self.shortcut_weight != 0.0:
return self.activation(
math.sqrt(1 - self.shortcut_weight**2) * out +
self.shortcut_weight * shortcut)
else:
return out
class BlockV2(hk.Module):
"""ResNet V2 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bottleneck: bool,
use_batch_norm: bool,
activation: Callable[[jnp.ndarray], jnp.ndarray],
shortcut_weight: Optional[float],
w_init: Optional[Any],
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
self.use_batch_norm = use_batch_norm
self.shortcut_weight = shortcut_weight
if self.use_projection and self.shortcut_weight != 0.0:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="shortcut_conv")
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1 if bottleneck else stride,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_0")
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride if bottleneck else 1,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_1")
layers = (conv_0, conv_1)
if use_batch_norm:
bn_0 = hk.BatchNorm(name="batchnorm_0", **BN_CONFIG)
bn_1 = hk.BatchNorm(name="batchnorm_1", **BN_CONFIG)
bn_layers = (bn_0, bn_1)
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
w_init=w_init,
with_bias=not use_batch_norm,
padding="SAME",
name="conv_2")
layers = layers + (conv_2,)
if use_batch_norm:
bn_2 = hk.BatchNorm(name="batchnorm_2", **BN_CONFIG)
bn_layers += (bn_2,)
self.bn_layers = bn_layers
self.layers = layers
self.activation = activation
def __call__(self, inputs, is_training, test_local_stats):
x = shortcut = inputs
for i, conv_i in enumerate(self.layers):
if self.use_batch_norm:
x = self.bn_layers[i](x, is_training, test_local_stats)
x = self.activation(x)
if i == 0 and self.use_projection and self.shortcut_weight != 0.0:
shortcut = self.proj_conv(x)
x = conv_i(x)
if self.shortcut_weight is None:
return x + shortcut
elif self.shortcut_weight != 0.0:
return math.sqrt(
1 - self.shortcut_weight**2) * x + self.shortcut_weight * shortcut
else:
return x
class BlockGroup(hk.Module):
"""Higher level block for ResNet implementation."""
def __init__(
self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
resnet_v2: bool,
bottleneck: bool,
use_projection: bool,
use_batch_norm: bool,
activation: Callable[[jnp.ndarray], jnp.ndarray],
shortcut_weight: Optional[float],
w_init: Optional[Any],
name: Optional[str] = None,
):
super().__init__(name=name)
block_cls = BlockV2 if resnet_v2 else BlockV1
self.blocks = []
for i in range(num_blocks):
self.blocks.append(
block_cls(
channels=channels,
stride=(1 if i else stride),
use_projection=(i == 0 and use_projection),
use_batch_norm=use_batch_norm,
bottleneck=bottleneck,
shortcut_weight=shortcut_weight,
activation=activation,
w_init=w_init,
name="block_%d" % (i)))
def __call__(self, inputs, is_training, test_local_stats):
out = inputs
for block in self.blocks:
out = block(out, is_training, test_local_stats)
return out
def check_length(length, value, name):
if len(value) != length:
raise ValueError(f"`{name}` must be of length 4 not {len(value)}")
class ModifiedResNet(hk.Module):
"""Modified version of an Imagenet ResNet model that supports DKS/TAT."""
CONFIGS = {
18: {
"blocks_per_group": (2, 2, 2, 2),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
34: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": False,
"channels_per_group": (64, 128, 256, 512),
"use_projection": (False, True, True, True),
},
50: {
"blocks_per_group": (3, 4, 6, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
101: {
"blocks_per_group": (3, 4, 23, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
152: {
"blocks_per_group": (3, 8, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
200: {
"blocks_per_group": (3, 24, 36, 3),
"bottleneck": True,
"channels_per_group": (256, 512, 1024, 2048),
"use_projection": (True, True, True, True),
},
}
def __init__(
self,
num_classes: int,
depth: int,
resnet_v2: bool = True,
use_batch_norm: bool = False,
shortcut_weight: Optional[float] = 0.0,
activation_name: str = "softplus",
w_init: Optional[Any] = haiku_initializers.ScaledUniformOrthogonal(
delta=True),
logits_config: Optional[Mapping[str, Any]] = None,
initial_conv_config: Optional[Mapping[str, FloatStrOrBool]] = None,
dropout_rate: float = 0.0,
transformation_method: str = "DKS",
dks_params: Optional[Mapping[str, FloatStrOrBool]] = None,
tat_params: Optional[Mapping[str, FloatStrOrBool]] = None,
name: Optional[str] = None,
):
"""Constructs a "modified ResNet model" with support for both DKS and TAT.
By default, we construct the network *without* normalization layers or
skip connections (making it a "vanilla network"), initialize the weights
with the SUO distribution, and use DKS to transform the activation functions
(which are "softplus" by default). These behaviors, and the option to use
TAT, are controlled via the contructor arguments.
This file was adapted from the original Haiku ResNet implementation:
https://github.com/deepmind/dm-haiku/blob/main/haiku/_src/nets/resnet.py
It is the end result of applying the rules described in the section titled
"Summary of our method" in the DKS paper (https://arxiv.org/abs/2110.01765)
to what is essentially a standard ResNet. See the section titled
"Application to various modified ResNets" in the DKS paper for more details.
The only departure from this is that we construct the "maximal C map
function" instead of the "maximal slope function" (which can be computed
from the former), which enables support for TAT.
Args:
num_classes: The number of classes to classify the inputs into.
depth: The number of layers.
resnet_v2: Whether to use the v2 ResNet implementation instead of v1.
Defaults to ``True``.
use_batch_norm: Whether to use Batch Normalization (BN). Note that DKS/TAT
are not compatible with the use of BN. Defaults to ``False``.
shortcut_weight: The weighting factor of shortcut branch, which must be
a float between 0 and 1, or None. If not None, the shortcut branch is
multiplied by ``shortcut_weight``, and the residual branch is multiplied
by ``residual_weight``, where
``shortcut_weight**2 + residual_weight**2 == 1.0``.
If None, no multiplications are performed (which corresponds to a
standard ResNet), and compatibility with DKS/TAT is lost. Note that
setting ``shortcut_weight`` to 0.0 effectively removes the skip
connections from the network. Defaults to ``0.0``.
activation_name: String name for activation function. To get TReLU from
the TAT paper one should set this to ``leaky_relu``, and set
the ``transformation_method`` argument to ``TAT``. Defaults to
``softplus``.
w_init: Haiku initializer used to initialize the weights.
logits_config: A dictionary of keyword arguments for the logits layer.
initial_conv_config: Keyword arguments passed to the constructor of the
initial :class:`~haiku.Conv2D` module.
dropout_rate: A float giving the dropout rate for penultimate layer of the
network (i.e. right before the layer which produces the class logits).
(Default: 0.0)
transformation_method: A string representing the method used to transform
the activation function. Can be ``DKS``, ``TAT``, or ``untransformed``.
Defaults to ``DKS``.
dks_params: A dictionary containing the parameters to use for DKS. See
activation_transform.get_transformed_activations for more details.
Defaults to ``None``.
tat_params: A dictionary containing the parameters to use for TAT. See
activation_transform.get_transformed_activations for more details.
Defaults to ``None``.
name: Name of the Sonnet module.
"""
super().__init__(name=name)
if shortcut_weight is not None and (shortcut_weight > 1.0
or shortcut_weight < 0.0):
raise ValueError("Unsupported value for shortcut_weight.")
if (use_batch_norm and
(transformation_method == "DKS" or transformation_method == "TAT")):
raise ValueError("DKS and TAT are not compatible with the use of BN "
"layers.")
if (shortcut_weight is None and
(transformation_method == "DKS" or transformation_method == "TAT")):
raise ValueError("Must specify a value for shortcut_weight when using "
"DKS or TAT.")
self.depth = depth
self.resnet_v2 = resnet_v2
self.use_batch_norm = use_batch_norm
self.shortcut_weight = shortcut_weight
self.activation_name = activation_name
self.dropout_rate = dropout_rate
blocks_per_group = ModifiedResNet.CONFIGS[depth]["blocks_per_group"]
channels_per_group = ModifiedResNet.CONFIGS[depth]["channels_per_group"]
bottleneck = ModifiedResNet.CONFIGS[depth]["bottleneck"]
use_projection = ModifiedResNet.CONFIGS[depth]["use_projection"]
logits_config = dict(logits_config or {})
logits_config.setdefault("w_init", w_init)
logits_config.setdefault("name", "logits")
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, "blocks_per_group")
check_length(4, channels_per_group, "channels_per_group")
initial_conv_config = dict(initial_conv_config or {})
initial_conv_config.setdefault("output_channels", 64)
initial_conv_config.setdefault("kernel_shape", 7)
initial_conv_config.setdefault("stride", 2)
initial_conv_config.setdefault("with_bias", not use_batch_norm)
initial_conv_config.setdefault("padding", "SAME")
initial_conv_config.setdefault("name", "initial_conv")
initial_conv_config.setdefault("w_init", w_init)
act_dict = activation_transform.get_transformed_activations(
[self.activation_name], method=transformation_method,
dks_params=dks_params, tat_params=tat_params,
subnet_max_func=self.subnet_max_func)
self.activation = act_dict[self.activation_name]
self.initial_conv = hk.Conv2D(**initial_conv_config)
if not self.resnet_v2 and use_batch_norm:
self.initial_batchnorm = hk.BatchNorm(
name="initial_batchnorm", **BN_CONFIG)
self.block_groups = []
strides = (1, 2, 2, 2)
for i in range(4):
self.block_groups.append(
BlockGroup(
channels=channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=strides[i],
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_batch_norm=use_batch_norm,
use_projection=use_projection[i],
shortcut_weight=shortcut_weight,
activation=self.activation,
w_init=w_init,
name="block_group_%d" % (i)))
if self.resnet_v2 and use_batch_norm:
self.final_batchnorm = hk.BatchNorm(name="final_batchnorm", **BN_CONFIG)
self.logits = hk.Linear(num_classes, **logits_config)
def __call__(self, inputs, is_training, test_local_stats=False):
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
if self.use_batch_norm:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = self.activation(out)
out = hk.max_pool(
out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding="SAME")
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
if self.use_batch_norm:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = self.activation(out)
out = jnp.mean(out, axis=(1, 2))
if self.dropout_rate > 0.0 and is_training:
out = hk.dropout(hk.next_rng_key(), self.dropout_rate, out)
return self.logits(out)
def subnet_max_func(self, x, r_fn):
return subnet_max_func(x, r_fn, self.depth, self.shortcut_weight)
def subnet_max_func(x, r_fn, depth, shortcut_weight):
"""The subnetwork maximizing function of the modified ResNet model."""
# See Appendix B of the TAT paper for a step-by-step procedure for how
# to compute this function for different architectures.
blocks_per_group = ModifiedResNet.CONFIGS[depth]["blocks_per_group"]
bottleneck = ModifiedResNet.CONFIGS[depth]["bottleneck"]
use_projection = ModifiedResNet.CONFIGS[depth]["use_projection"]
res_branch_subnetwork_x = r_fn(r_fn(r_fn(x)))
for i in range(4):
for j in range(blocks_per_group[i]):
if bottleneck:
res_x = r_fn(r_fn(r_fn(x)))
else:
res_x = r_fn(r_fn(x))
shortcut_x = r_fn(x) if (j == 0 and use_projection[i]) else x
x = (shortcut_weight**2 * shortcut_x + (1.0 - shortcut_weight**2) * res_x)
x = r_fn(x)
return max(x, res_branch_subnetwork_x)
| |
#! /usr/bin/env python
# MIT License
#
# Copyright (c) 2018 Michael J Simms. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import csv
import json
import os
import random
import sys
import time
from isolationforest import IsolationForest
def test_random(num_trees, sub_sampling_size, num_training_samples, num_tests, plot, plot_filename):
forest = IsolationForest.Forest(num_trees, sub_sampling_size)
# Note the time at which the test began.
start_time = time.time()
# Create some training samples.
training_x = []
training_y = []
for i in range(0,num_training_samples):
sample = IsolationForest.Sample("Training Sample " + str(i))
features = []
x = random.randint(0,25)
y = random.randint(0,25)
features.append({"x": x})
features.append({"y": y})
sample.add_features(features)
forest.add_sample(sample)
# So we can graph this later.
training_x.append(x)
training_y.append(y)
# Create the isolation forest.
forest.create()
# Test samples (similar to training samples).
normal_x = []
normal_y = []
avg_control_set_score = 0.0
avg_control_set_normalized_score = 0.0
for i in range(0,num_tests):
sample = IsolationForest.Sample("Normal Sample " + str(i))
features = []
x = random.randint(0,25)
y = random.randint(0,25)
features.append({"x": x})
features.append({"y": y})
sample.add_features(features)
# So we can graph this later.
normal_x.append(x)
normal_y.append(y)
# Run a test with the sample that doesn't contain outliers.
score = forest.score(sample)
normalized_score = forest.normalized_score(sample)
avg_control_set_score = avg_control_set_score + score
avg_control_set_normalized_score = avg_control_set_normalized_score + normalized_score
avg_control_set_score = avg_control_set_score / num_tests
avg_control_set_normalized_score = avg_control_set_normalized_score / num_tests
# Test samples (different from training samples).
outlier_x = []
outlier_y = []
avg_outlier_set_score = 0.0
avg_outlier_set_normalized_score = 0.0
for i in range(0,num_tests):
sample = IsolationForest.Sample("Outlier Sample " + str(i))
features = []
x = random.randint(20,45)
y = random.randint(20,45)
features.append({"x": x})
features.append({"y": y})
sample.add_features(features)
# So we can graph this later.
outlier_x.append(x)
outlier_y.append(y)
# Run a test with the sample that doesn't contain outliers.
score = forest.score(sample)
normalized_score = forest.normalized_score(sample)
avg_outlier_set_score = avg_outlier_set_score + score
avg_outlier_set_normalized_score = avg_outlier_set_normalized_score + normalized_score
avg_outlier_set_score = avg_outlier_set_score / num_tests
avg_outlier_set_normalized_score = avg_outlier_set_normalized_score / num_tests
# Compute the elapsed time.
elapsed_time = time.time() - start_time
# Create a trace.
if plot:
import plotly
import plotly.graph_objs as go
training_trace = go.Scatter(x=training_x, y=training_y, mode='markers', name='training')
normal_trace = go.Scatter(x=normal_x, y=normal_y, mode='markers', name='normal')
outlier_trace = go.Scatter(x=outlier_x, y=outlier_y, mode='markers', name='outlier')
data = [training_trace, normal_trace, outlier_trace]
plotly.offline.plot(data, filename=plot_filename)
return avg_control_set_score, avg_control_set_normalized_score, avg_outlier_set_score, avg_outlier_set_normalized_score, elapsed_time
def test_iris(num_trees, sub_sampling_size, plot, dump, load):
FEATURE_SEPAL_LENGTH_CM = "sepal length cm"
FEATURE_SEPAL_WIDTH_CM = "sepal width cm"
FEATURE_PETAL_LENGTH_CM = "petal length cm"
FEATURE_PETAL_WIDTH_CM = "petal width cm"
forest = IsolationForest.Forest(num_trees, sub_sampling_size)
avg_control_set_score = 0.0
avg_outlier_set_score = 0.0
avg_control_set_normalized_score = 0.0
avg_outlier_set_normalized_score = 0.0
num_control_tests = 0
num_outlier_tests = 0
# Test loading a forest from file.
if load:
with open('isolationforest_test_iris.json', 'rt') as json_file:
json_str = json_file.read()
json_data = json.loads(json_str)
forest.load(json_data)
# Note the time at which the test began.
start_time = time.time()
# Find and open the iris data file.
data_file_name = os.path.realpath(os.path.join(os.path.realpath(__file__), "..", "..", "data", "iris.data.txt"))
if os.path.isfile(data_file_name):
with open(data_file_name) as csv_file:
training_class_name = 'Iris-setosa'
training_samples = []
test_samples = []
# Each row in the file represents one sample. We'll use some for training and save some for test.
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# Check for junk.
if len(row) < 5:
continue
features = []
features.append({FEATURE_SEPAL_LENGTH_CM: float(row[0])})
features.append({FEATURE_SEPAL_WIDTH_CM: float(row[1])})
features.append({FEATURE_PETAL_LENGTH_CM: float(row[2])})
features.append({FEATURE_PETAL_WIDTH_CM: float(row[3])})
sample = IsolationForest.Sample(row[4])
sample.add_features(features)
# Randomly split the samples into training and test samples.
if random.randint(0,10) > 5 and row[4] == training_class_name: # Use for training
if not load: # We loaded the forest from a file, so don't modify it here.
forest.add_sample(sample)
training_samples.append(sample)
else: # Save for test
test_samples.append(sample)
# Create the forest.
forest.create()
# Use each test sample.
for test_sample in test_samples:
score = forest.score(test_sample)
normalized_score = forest.normalized_score(test_sample)
if training_class_name == test_sample.name:
avg_control_set_score = avg_control_set_score + score
avg_control_set_normalized_score = avg_control_set_normalized_score + normalized_score
num_control_tests = num_control_tests + 1
else:
avg_outlier_set_score = avg_outlier_set_score + score
avg_outlier_set_normalized_score = avg_outlier_set_normalized_score + normalized_score
num_outlier_tests = num_outlier_tests + 1
# Compute statistics.
if num_control_tests > 0:
avg_control_set_score = avg_control_set_score / num_control_tests
avg_control_set_normalized_score = avg_control_set_normalized_score / num_control_tests
if num_outlier_tests > 0:
avg_outlier_set_score = avg_outlier_set_score / num_outlier_tests
avg_outlier_set_normalized_score = avg_outlier_set_normalized_score / num_outlier_tests
# Compute the elapsed time.
elapsed_time = time.time() - start_time
# Create a trace.
if plot:
import plotly
import plotly.graph_objs as go
training_x = []
training_y = []
test_x = []
test_y = []
for sample in training_samples:
training_x.append(sample.features[FEATURE_SEPAL_LENGTH_CM])
training_y.append(sample.features[FEATURE_SEPAL_WIDTH_CM])
for sample in test_samples:
test_x.append(sample.features[FEATURE_SEPAL_LENGTH_CM])
test_y.append(sample.features[FEATURE_SEPAL_WIDTH_CM])
training_trace = go.Scatter(x=training_x, y=training_y, mode='markers', name='training')
test_trace = go.Scatter(x=test_x, y=test_y, mode='markers', name='test')
data = [training_trace, test_trace]
plotly.offline.plot(data, filename='isolationforest_test_iris.html')
# Write the forest structure to disk.
if dump:
json_data = forest.dump()
with open('isolationforest_test_iris.json', 'wt') as json_file:
json_file.write(json.dumps(json_data))
else:
print("Could not find " + data_file_name)
return avg_control_set_score, avg_control_set_normalized_score, avg_outlier_set_score, avg_outlier_set_normalized_score, elapsed_time
def main():
# Parse command line options.
parser = argparse.ArgumentParser()
parser.add_argument("--plot", action="store_true", default=False, help="Plots the test data", required=False)
parser.add_argument("--dump", action="store_true", default=False, help="Dumps the forest data to a file", required=False)
parser.add_argument("--load", action="store_true", default=False, help="Loads the forest data from a file", required=False)
try:
args = parser.parse_args()
except IOError as e:
parser.error(e)
sys.exit(1)
print("Test 1")
print("------")
avg_control_set_score, avg_control_set_normalized_score, avg_outlier_set_score, avg_outlier_set_normalized_score, elapsed_time = test_random(10, 10, 100, 100, args.plot, 'isolationforest_test_1.html')
print("Average of control test samples: %.4f" % avg_control_set_score)
print("Average of normalized control test samples: %.4f" % avg_control_set_normalized_score)
print("Average of outlier test samples: %.4f" % avg_outlier_set_score)
print("Average of normalized outlier test samples: %.4f" % avg_outlier_set_normalized_score)
print("Total time for Test 1: %.4f" % elapsed_time + " seconds.\n")
print("Test 2")
print("------")
avg_control_set_score, avg_control_set_normalized_score, avg_outlier_set_score, avg_outlier_set_normalized_score, elapsed_time = test_random(100, 100, 1000, 100, args.plot, 'isolationforest_test_2.html')
print("Average of control test samples: %.4f" % avg_control_set_score)
print("Average of normalized control test samples: %.4f" % avg_control_set_normalized_score)
print("Average of outlier test samples: %.4f" % avg_outlier_set_score)
print("Average of normalized outlier test samples: %.4f" % avg_outlier_set_normalized_score)
print("Total time for Test 2: %.4f" % elapsed_time + " seconds.\n")
print("Test 3 (Iris Test)")
print("------------------")
avg_control_set_score, avg_control_set_normalized_score, avg_outlier_set_score, avg_outlier_set_normalized_score, elapsed_time = test_iris(50, 50, args.plot, args.dump, args.load)
print("Average of control test samples: %.4f" % avg_control_set_score)
print("Average of normalized control test samples: %.4f" % avg_control_set_normalized_score)
print("Average of outlier test samples: %.4f" % avg_outlier_set_score)
print("Average of normalized outlier test samples: %.4f" % avg_outlier_set_normalized_score)
print("Total time for Test 3 (Iris Test): %.4f" % elapsed_time + " seconds.")
if __name__ == "__main__":
main()
| |
import pytest
from openshift_checks.logging.fluentd_config import FluentdConfig, OpenShiftCheckException
def canned_fluentd_pod(containers):
return {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
"name": "logging-fluentd-1",
},
"spec": {
"host": "node1",
"nodeName": "node1",
"containers": containers,
},
"status": {
"phase": "Running",
"containerStatuses": [{"ready": True}],
"conditions": [{"status": "True", "type": "Ready"}],
}
}
fluentd_pod = {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
"name": "logging-fluentd-1",
},
"spec": {
"host": "node1",
"nodeName": "node1",
"containers": [
{
"name": "container1",
"env": [
{
"name": "USE_JOURNAL",
"value": "true",
}
],
}
],
},
"status": {
"phase": "Running",
"containerStatuses": [{"ready": True}],
"conditions": [{"status": "True", "type": "Ready"}],
}
}
not_running_fluentd_pod = {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
"name": "logging-fluentd-2",
},
"status": {
"phase": "Unknown",
"containerStatuses": [{"ready": True}, {"ready": False}],
"conditions": [{"status": "True", "type": "Ready"}],
}
}
@pytest.mark.parametrize('name, use_journald, logging_driver, extra_words', [
(
'test success with use_journald=false, and docker config set to use "json-file"',
False,
"json-file",
[],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_non_master(name, use_journald, logging_driver, extra_words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_nodes_to_config", "oo_etcd_to_config"],
openshift_logging_fluentd_use_journal=use_journald,
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
error = check.check_logging_config()
assert error is None
@pytest.mark.parametrize('name, use_journald, logging_driver, words', [
(
'test failure with use_journald=false, but docker config set to use "journald"',
False,
"journald",
['json log files', 'has been set to use "journald"'],
),
(
'test failure with use_journald=false, but docker config set to use an "unsupported" driver',
False,
"unsupported",
["json log files", 'has been set to use "unsupported"'],
),
(
'test failure with use_journald=true, but docker config set to use "json-file"',
True,
"json-file",
['logs from "journald"', 'has been set to use "json-file"'],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_non_master_failed(name, use_journald, logging_driver, words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_nodes_to_config", "oo_etcd_to_config"],
openshift_logging_fluentd_use_journal=use_journald,
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
error = check.check_logging_config()
assert error is not None
for word in words:
assert word in error
@pytest.mark.parametrize('name, pods, logging_driver, extra_words', [
# use_journald returns false (not using journald), but check succeeds
# since docker is set to use json-file
(
'test success with use_journald=false, and docker config set to use default driver "json-file"',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "USE_JOURNAL",
"value": "false",
}],
},
]
)],
"json-file",
[],
),
(
'test success with USE_JOURNAL env var missing and docker config set to use default driver "json-file"',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "RANDOM",
"value": "value",
}],
},
]
)],
"json-file",
[],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_master(name, pods, logging_driver, extra_words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
check.get_pods_for_component = lambda _: pods
error = check.check_logging_config()
assert error is None
@pytest.mark.parametrize('name, pods, logging_driver, words', [
(
'test failure with use_journald=false, but docker config set to use "journald"',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "USE_JOURNAL",
"value": "false",
}],
},
]
)],
"journald",
['json log files', 'has been set to use "journald"'],
),
(
'test failure with use_journald=true, but docker config set to use "json-file"',
[fluentd_pod],
"json-file",
['logs from "journald"', 'has been set to use "json-file"'],
),
(
'test failure with use_journald=false, but docker set to use an "unsupported" driver',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "USE_JOURNAL",
"value": "false",
}],
},
]
)],
"unsupported",
["json log files", 'has been set to use "unsupported"'],
),
(
'test failure with USE_JOURNAL env var missing and docker config set to use "journald"',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "RANDOM",
"value": "value",
}],
},
]
)],
"journald",
["configuration is set to", "json log files"],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_master_failed(name, pods, logging_driver, words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
check.get_pods_for_component = lambda _: pods
error = check.check_logging_config()
assert error is not None
for word in words:
assert word in error
@pytest.mark.parametrize('name, pods, response, logging_driver, extra_words', [
(
'test OpenShiftCheckException with no running containers',
[canned_fluentd_pod([])],
{
"failed": True,
"result": "unexpected",
},
"json-file",
['no running containers'],
),
(
'test OpenShiftCheckException one container and no env vars set',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [],
},
]
)],
{
"failed": True,
"result": "unexpected",
},
"json-file",
['no environment variables'],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods, response, logging_driver, extra_words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.get_pods_for_component = lambda _: pods
with pytest.raises(OpenShiftCheckException) as error:
check.check_logging_config()
assert error is not None
for word in extra_words:
assert word in str(error)
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Jointly fit subunits and output NL."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import gfile
rng = np.random
def compute_fr_loss(K, b, X_in, Y_in, nl_params=np.expand_dims(np.array([1.0, 0.0]), 1)):
""" Compute firing rate and loss.
Args:
K : # n_pix x #SU
b : # SU x # cells
X_in : T x # pixels
Y_in : T x # cells
"""
f = np.exp(np.expand_dims(np.dot(X_in, K), 2) + b) # T x SU x Cells
fsum = f.sum(1) # T x # cells
# apply nonlinearity
fsum = np.power(fsum, nl_params[0, :]) / (nl_params[1, :] * fsum + 1)
loss = np.mean(fsum, 0) - np.mean(Y_in * np.log(fsum), 0) # cells
return fsum, loss
def get_neighbormat(mask_matrix, nbd=1):
mask = np.ndarray.flatten(mask_matrix)>0
x = np.repeat(np.expand_dims(np.arange(mask_matrix.shape[0]), 1), mask_matrix.shape[1], 1)
y = np.repeat(np.expand_dims(np.arange(mask_matrix.shape[1]), 0), mask_matrix.shape[0], 0)
x = np.ndarray.flatten(x)
y = np.ndarray.flatten(y)
idx = np.arange(len(mask))
iidx = idx[mask]
xx = np.expand_dims(x[mask], 1)
yy = np.expand_dims(y[mask], 1)
distance = (xx - xx.T)**2 + (yy - yy.T)**2
neighbor_mat = np.double(distance <= nbd)
return neighbor_mat
def Flat_clustering_jnt(X, Y, Ns, tms_tr, tms_tst, K=None, b=None,
steps_max=10000, eps=1e-6,
projection_type=None, neighbor_mat=None,
lam_proj=0, eps_proj=0.01,
save_filename_partial=None,
fitting_phases=[1, 2, 3]):
# projection_op='lnl1'
# X is Txmask
X_tr = X[tms_tr, :]
Y_tr = Y[tms_tr, :]
X_test = X[tms_tst, :]
Y_test = Y[tms_tst, :]
Tlen = Y_tr.shape[0]
times = np.arange(Tlen)
N1 = X_tr.shape[1]
n_cells = Y.shape[1]
Sigma = numpy.dot(X_tr.transpose(),X_tr)/float(X_tr.shape[0])
if projection_type == 'lnl1':
if neighbor_mat is None:
neighbor_mat = np.eye(N1)
# load previously saved data
if gfile.Exists(save_filename_partial):
try:
data = pickle.load(gfile.Open(save_filename_partial, 'r'))
K = data['K']
b = data['b']
lam_log = data['lam_log']
lam_log_test = data['lam_log_test']
irepeat_start = data['irepeat']
lam = lam_log[-1]
lam_test = lam_log_test[-1]
lam_min = data['lam_min']
K_min = data['K_min']
b_min = data['b_min']
print('Partially fit model parameters loaded')
except:
print('Error in loading file')
if K is None:
K = 2*rng.rand(N1,Ns)-0.5
K_min = np.copy(K)
if b is None:
b = 2*rng.rand(Ns, n_cells)-0.5
b_min = np.copy(b)
lam_log = np.zeros((0, n_cells))
lam_log_test = np.zeros((0, n_cells))
lam = np.inf
lam_test = np.inf
lam_min = np.inf
irepeat_start = 0
else:
print('No partially fit model')
# initialize filters
if K is None:
K = 2*rng.rand(N1,Ns)-0.5
K_min = np.copy(K)
if b is None:
b = 2*rng.rand(Ns, n_cells)-0.5
b_min = np.copy(b)
lam_log = np.zeros((0, n_cells))
lam_log_test = np.zeros((0, n_cells))
lam = np.inf
lam_test = np.inf
lam_min = np.inf
irepeat_start = 0
print('Variables initialized')
fitting_phase = np.array([])
fit_params = []
# Find subunits - no output NL
if 1 in fitting_phases:
for irepeat in range(irepeat_start, np.int(steps_max)):
if irepeat % 100 == 99:
save_dict = {'K': K, 'b': b, 'lam_log': lam_log,
'lam_log_test': lam_log_test, 'irepeat': irepeat,
'K_min': K_min, 'b_min': b_min, 'lam_min': lam_min}
if save_filename_partial is not None:
pickle.dump(save_dict, gfile.Open(save_filename_partial, 'w' ))
# compute reweighted L1 weights
if projection_type == 'lnl1':
wts = 1 / (neighbor_mat.dot(np.abs(K)) + eps_proj)
# test data
_, lam_test = compute_fr_loss(K, b, X_test, Y_test)
lam_log_test = np.append(lam_log_test, np.expand_dims(lam_test, 0), 0)
# train data
lam_prev = np.copy(lam)
_, lam = compute_fr_loss(K, b, X_tr, Y_tr)
lam_log = np.append(lam_log, np.expand_dims(lam, 0), 0)
if np.sum(lam) < np.sum(lam_min) :
K_min = np.copy(K)
b_min = np.copy(b)
lam_min = np.copy(lam)
lam_test_at_lam_min = np.copy(lam_test)
#print(itime)
K_new_list_nr = []
K_new_list_dr = []
mean_ass_f_list = []
for icell in range(n_cells):
tms = np.int64(np.arange(Tlen))
t_sp = tms[Y_tr[:, icell] != 0]
Y_tsp = Y_tr[t_sp, icell]
f = np.exp(numpy.dot(X_tr, K) + b[:, icell])
alpha = (f.transpose()/f.sum(1)).transpose()
xx = (Y_tsp.transpose()*alpha[t_sp, :].T).T
sta_f = X_tr[t_sp,:].transpose().dot(xx)
mean_ass_f = xx.sum(0)
K_new_list_nr += [numpy.linalg.solve(Sigma,sta_f)]
K_new_list_dr += [mean_ass_f]
mean_ass_f_list += [mean_ass_f]
K_new_list_nr = np.array(K_new_list_nr)
K_new_list_dr = np.array(K_new_list_dr)
mean_ass_f_list = np.array(mean_ass_f_list).T # recompute ??
K = np.mean(K_new_list_nr, 0) / np.mean(K_new_list_dr, 0)
# Soft thresholding for K
if projection_type == 'lnl1':
K = np.maximum(K - (wts * lam_proj), 0) - np.maximum(- K - (wts * lam_proj), 0)
if projection_type == 'l1':
K = np.maximum(K - lam_proj, 0) - np.maximum(- K - lam_proj, 0)
b = np.log((1/Tlen)*mean_ass_f_list)- np.expand_dims(np.diag(0.5*K.transpose().dot(Sigma.dot(K))), 1)
# print(irepeat, lam, lam_prev)
if np.sum(np.abs(lam_prev - lam)) < eps:
#print('Subunits fitted, Train loss: %.7f, '
# 'Test loss: %.7f after %d iterations' % (lam, lam_test, irepeat))
break
fitting_phase = np.append(fitting_phase, np.ones(lam_log.shape[0]))
nl_params = np.repeat(np.expand_dims(np.array([1.0, 0.0]), 1), n_cells, 1)
fit_params += [[np.copy(K_min), np.copy(b_min), nl_params]]
# fit NL + b + Kscale
if 2 in fitting_phases:
K, b, nl_params, loss_log, loss_log_test = fit_scales(X_tr, Y_tr,
X_test, Y_test,
Ns=Ns, K=K, b=b,
params=nl_params,
lr=0.001, eps=eps)
if 'lam_log' in vars():
lam_log = np.append(lam_log, np.array(loss_log), 0)
else:
lam_log = np.array(loss_log)
if 'lam_log_test' in vars():
lam_log_test = np.append(lam_log_test, np.array(loss_log_test), 0)
else:
lam_log_test = np.array(loss_log_test)
fitting_phase = np.append(fitting_phase, 2 * np.ones(np.array(loss_log).shape[0]))
fit_params += [[np.copy(K), np.copy(b), nl_params]]
# Fit all params
if 3 in fitting_phases:
K, b, nl_params, loss_log, loss_log_test = fit_all(X_tr, Y_tr, X_test, Y_test,
Ns=Ns, K=K, b=b, train_phase=3,
params=nl_params,
lr=0.001, eps=eps)
if 'lam_log' in vars():
lam_log = np.append(lam_log, np.array(loss_log), 0)
else:
lam_log = np.array(loss_log)
if 'lam_log_test' in vars():
lam_log_test = np.append(lam_log_test, np.array(loss_log_test), 0)
else:
lam_log_test = np.array(loss_log_test)
fitting_phase = np.append(fitting_phase, 3 * np.ones(np.array(loss_log).shape[0]))
fit_params += [[np.copy(K), np.copy(b), nl_params]]
return K, b, alpha, lam_log, lam_log_test, fitting_phase, fit_params
def fit_all(X_tr, Y_tr, X_test, Y_test,
Ns=5, K=None, b=None, params=None,
train_phase=2, lr=0.1, eps=1e-9):
X = tf.placeholder(tf.float32) # T x Nsub
Y = tf.placeholder(tf.float32) # T
# initialize filters
if K is None or b is None or params is None:
raise "Not initialized"
K_tf = tf.Variable(K.astype(np.float32))
b_tf = tf.Variable(b.astype(np.float32))
params_tf = tf.Variable(np.array(params).astype(np.float32))
lam_int = tf.reduce_sum(tf.exp(tf.expand_dims(tf.matmul(X, K_tf), 2) + b_tf), 1) # T x # cells
# lam = params_tf[0]*lam_int / (params_tf[1]*lam_int + 1)
lam = tf.pow(lam_int, params_tf[0, :])/ (params_tf[1, :] * lam_int + 1) # T x # cells
loss = tf.reduce_mean(lam, 0) - tf.reduce_mean(Y * tf.log(lam), 0)
loss_all_cells = tf.reduce_sum(loss)
if train_phase == 2:
train_op = tf.train.AdamOptimizer(lr).minimize(loss_all_cells, var_list=[b_tf, params_tf])
if train_phase == 3:
train_op = tf.train.AdamOptimizer(lr).minimize(loss_all_cells, var_list=[K_tf, b_tf, params_tf])
with tf.control_dependencies([train_op]):
param_pos = tf.assign(params_tf[1], tf.nn.relu(params_tf[1]))
train_op_grp = tf.group(train_op, param_pos)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
K_min = sess.run(K_tf)
b_min = sess.run(b_tf)
params_min = sess.run(params_tf)
l_tr_log = []
l_test_log = []
l_tr_prev = np.inf
l_min = np.inf
for iiter in range(100000):
l_tr, _ = sess.run([loss, train_op_grp], feed_dict={X: X_tr, Y: Y_tr})
l_test = sess.run(loss, feed_dict={X: X_test, Y: Y_test})
l_tr_log += [l_tr]
l_test_log += [l_test]
#print(iiter, l_tr)
if np.sum(l_tr) < np.sum(l_min) :
K_min = sess.run(K_tf)
b_min = sess.run(b_tf)
params_min = sess.run(params_tf)
l_min = l_tr
if np.sum(np.abs(l_tr_prev - l_tr)) < eps:
# print('Nonlinearity fit after : %d iters, Train loss: %.7f' % (iiter, l_tr))
break
l_tr_prev = l_tr
return K_min, b_min, params_min, l_tr_log, l_test_log
def fit_scales(X_tr, Y_tr, X_test, Y_test,
Ns=5, K=None, b=None, params=None, lr=0.1, eps=1e-9):
X = tf.placeholder(tf.float32) # T x Nsub
Y = tf.placeholder(tf.float32) # T x n_cells
# initialize filters
if K is None or b is None or params is None:
raise "Not initialized"
K_tf_unscaled = tf.constant(K.astype(np.float32))
K_scale = tf.Variable(np.ones((1, K.shape[1])).astype(np.float32))
K_tf = tf.multiply(K_tf_unscaled, K_scale)
b_tf = tf.Variable(b.astype(np.float32))
params_tf = tf.Variable(np.array(params).astype(np.float32)) # 2 x # cells
lam_int = tf.reduce_sum(tf.exp(tf.expand_dims(tf.matmul(X, K_tf), 2) + b_tf), 1) # T x # cells
# lam = params_tf[0]*lam_int / (params_tf[1]*lam_int + 1)
lam = tf.pow(lam_int, params_tf[0, :])/ (params_tf[1, :] * lam_int + 1) # T x # cells
loss = tf.reduce_mean(lam, 0) - tf.reduce_mean(Y * tf.log(lam), 0)
loss_all_cells = tf.reduce_sum(loss)
train_op = tf.train.AdamOptimizer(lr).minimize(loss_all_cells, var_list=[K_scale, b_tf, params_tf])
with tf.control_dependencies([train_op]):
param_pos = tf.assign(params_tf[1], tf.nn.relu(params_tf[1]))
train_op_grp = tf.group(train_op, param_pos)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
K_min = sess.run(K_tf)
b_min = sess.run(b_tf)
params_min = sess.run(params_tf)
l_tr_log = []
l_test_log = []
l_tr_prev = np.inf
l_min = np.inf
for iiter in range(100000):
l_tr, _ = sess.run([loss, train_op_grp], feed_dict={X: X_tr, Y: Y_tr})
l_test = sess.run(loss, feed_dict={X: X_test, Y: Y_test})
l_tr_log += [l_tr]
l_test_log += [l_test]
# from IPython import embed; embed()
# print(iiter, l_tr)
if np.sum(l_tr) < np.sum(l_min) :
K_min = sess.run(K_tf)
b_min = sess.run(b_tf)
params_min = sess.run(params_tf)
l_min = l_tr
if np.sum(np.abs(l_tr_prev - l_tr)) < eps:
# print('Nonlinearity fit after : %d iters, Train loss: %.7f' % (iiter, l_tr))
break
l_tr_prev = l_tr
return K_min, b_min, params_min, l_tr_log, l_test_log
| |
import tensorflow as tf
import numpy as np
from scipy.misc import imsave
from skimage.transform import resize
from copy import deepcopy
import os
import constants as c
from loss_functions import combined_loss
from utils import psnr_error, sharp_diff_error
from tfutils import w, b
# noinspection PyShadowingNames
class GeneratorModel:
def __init__(self, session, summary_writer, height_train, width_train, height_test,
width_test, scale_layer_fms, scale_kernel_sizes):
"""
Initializes a GeneratorModel.
@param session: The TensorFlow Session.
@param summary_writer: The writer object to record TensorBoard summaries
@param height_train: The height of the input images for training.
@param width_train: The width of the input images for training.
@param height_test: The height of the input images for testing.
@param width_test: The width of the input images for testing.
@param scale_layer_fms: The number of feature maps in each layer of each scale network.
@param scale_kernel_sizes: The size of the kernel for each layer of each scale network.
@type session: tf.Session
@type summary_writer: tf.train.SummaryWriter
@type height_train: int
@type width_train: int
@type height_test: int
@type width_test: int
@type scale_layer_fms: list<list<int>>
@type scale_kernel_sizes: list<list<int>>
"""
self.sess = session
self.summary_writer = summary_writer
self.height_train = height_train
self.width_train = width_train
self.height_test = height_test
self.width_test = width_test
self.scale_layer_fms = scale_layer_fms
self.scale_kernel_sizes = scale_kernel_sizes
self.num_scale_nets = len(scale_layer_fms)
self.define_graph()
# noinspection PyAttributeOutsideInit
def define_graph(self):
"""
Sets up the model graph in TensorFlow.
"""
with tf.name_scope('generator'):
##
# Data
##
with tf.name_scope('data'):
self.input_frames_train = tf.placeholder(
tf.float32, shape=[None, self.height_train, self.width_train, 3 * c.HIST_LEN])
self.gt_frames_train = tf.placeholder(
tf.float32, shape=[None, self.height_train, self.width_train, 3])
self.input_frames_test = tf.placeholder(
tf.float32, shape=[None, self.height_test, self.width_test, 3 * c.HIST_LEN])
self.gt_frames_test = tf.placeholder(
tf.float32, shape=[None, self.height_test, self.width_test, 3])
# use variable batch_size for more flexibility
self.batch_size_train = tf.shape(self.input_frames_train)[0]
self.batch_size_test = tf.shape(self.input_frames_test)[0]
##
# Scale network setup and calculation
##
self.summaries_train = []
self.scale_preds_train = [] # the generated images at each scale
self.scale_gts_train = [] # the ground truth images at each scale
self.d_scale_preds = [] # the predictions from the discriminator model
self.summaries_test = []
self.scale_preds_test = [] # the generated images at each scale
self.scale_gts_test = [] # the ground truth images at each scale
for scale_num in xrange(self.num_scale_nets):
with tf.name_scope('scale_' + str(scale_num)):
with tf.name_scope('setup'):
ws = []
bs = []
# create weights for kernels
for i in xrange(len(self.scale_kernel_sizes[scale_num])):
ws.append(w([self.scale_kernel_sizes[scale_num][i],
self.scale_kernel_sizes[scale_num][i],
self.scale_layer_fms[scale_num][i],
self.scale_layer_fms[scale_num][i + 1]]))
bs.append(b([self.scale_layer_fms[scale_num][i + 1]]))
with tf.name_scope('calculation'):
def calculate(height, width, inputs, gts, last_gen_frames):
# scale inputs and gts
scale_factor = 1. / 2 ** ((self.num_scale_nets - 1) - scale_num)
scale_height = int(height * scale_factor)
scale_width = int(width * scale_factor)
inputs = tf.image.resize_images(inputs, [scale_height, scale_width])
scale_gts = tf.image.resize_images(gts, [scale_height, scale_width])
# for all scales but the first, add the frame generated by the last
# scale to the input
if scale_num > 0:
last_gen_frames = tf.image.resize_images(
last_gen_frames,[scale_height, scale_width])
inputs = tf.concat(3, [inputs, last_gen_frames])
# generated frame predictions
preds = inputs
# perform convolutions
with tf.name_scope('convolutions'):
for i in xrange(len(self.scale_kernel_sizes[scale_num])):
# Convolve layer
preds = tf.nn.conv2d(
preds, ws[i], [1, 1, 1, 1], padding=c.PADDING_G)
# Activate with ReLU (or Tanh for last layer)
if i == len(self.scale_kernel_sizes[scale_num]) - 1:
preds = tf.nn.tanh(preds + bs[i])
else:
preds = tf.nn.relu(preds + bs[i])
return preds, scale_gts
##
# Perform train calculation
##
# for all scales but the first, add the frame generated by the last
# scale to the input
if scale_num > 0:
last_scale_pred_train = self.scale_preds_train[scale_num - 1]
else:
last_scale_pred_train = None
# calculate
train_preds, train_gts = calculate(self.height_train,
self.width_train,
self.input_frames_train,
self.gt_frames_train,
last_scale_pred_train)
self.scale_preds_train.append(train_preds)
self.scale_gts_train.append(train_gts)
# We need to run the network first to get generated frames, run the
# discriminator on those frames to get d_scale_preds, then run this
# again for the loss optimization.
if c.ADVERSARIAL:
self.d_scale_preds.append(tf.placeholder(tf.float32, [None, 1]))
##
# Perform test calculation
##
# for all scales but the first, add the frame generated by the last
# scale to the input
if scale_num > 0:
last_scale_pred_test = self.scale_preds_test[scale_num - 1]
else:
last_scale_pred_test = None
# calculate
test_preds, test_gts = calculate(self.height_test,
self.width_test,
self.input_frames_test,
self.gt_frames_test,
last_scale_pred_test)
self.scale_preds_test.append(test_preds)
self.scale_gts_test.append(test_gts)
##
# Training
##
with tf.name_scope('train'):
# global loss is the combined loss from every scale network
self.global_loss = combined_loss(self.scale_preds_train,
self.scale_gts_train,
self.d_scale_preds)
self.global_step = tf.Variable(0, trainable=False)
self.optimizer = tf.train.AdamOptimizer(learning_rate=c.LRATE_G, name='optimizer')
self.train_op = self.optimizer.minimize(self.global_loss,
global_step=self.global_step,
name='train_op')
# train loss summary
loss_summary = tf.scalar_summary('train_loss_G', self.global_loss)
self.summaries_train.append(loss_summary)
##
# Error
##
with tf.name_scope('error'):
# error computation
# get error at largest scale
self.psnr_error_train = psnr_error(self.scale_preds_train[-1],
self.gt_frames_train)
self.sharpdiff_error_train = sharp_diff_error(self.scale_preds_train[-1],
self.gt_frames_train)
self.psnr_error_test = psnr_error(self.scale_preds_test[-1],
self.gt_frames_test)
self.sharpdiff_error_test = sharp_diff_error(self.scale_preds_test[-1],
self.gt_frames_test)
# train error summaries
summary_psnr_train = tf.scalar_summary('train_PSNR',
self.psnr_error_train)
summary_sharpdiff_train = tf.scalar_summary('train_SharpDiff',
self.sharpdiff_error_train)
self.summaries_train += [summary_psnr_train, summary_sharpdiff_train]
# test error
summary_psnr_test = tf.scalar_summary('test_PSNR',
self.psnr_error_test)
summary_sharpdiff_test = tf.scalar_summary('test_SharpDiff',
self.sharpdiff_error_test)
self.summaries_test += [summary_psnr_test, summary_sharpdiff_test]
# add summaries to visualize in TensorBoard
self.summaries_train = tf.merge_summary(self.summaries_train)
self.summaries_test = tf.merge_summary(self.summaries_test)
def train_step(self, batch, discriminator=None):
"""
Runs a training step using the global loss on each of the scale networks.
@param batch: An array of shape
[c.BATCH_SIZE x self.height x self.width x (3 * (c.HIST_LEN + 1))].
The input and output frames, concatenated along the channel axis (index 3).
@param discriminator: The discriminator model. Default = None, if not adversarial.
@return: The global step.
"""
##
# Split into inputs and outputs
##
input_frames = batch[:, :, :, :-3]
gt_frames = batch[:, :, :, -3:]
##
# Train
##
feed_dict = {self.input_frames_train: input_frames, self.gt_frames_train: gt_frames}
if c.ADVERSARIAL:
# Run the generator first to get generated frames
scale_preds = self.sess.run(self.scale_preds_train, feed_dict=feed_dict)
# Run the discriminator nets on those frames to get predictions
d_feed_dict = {}
for scale_num, gen_frames in enumerate(scale_preds):
d_feed_dict[discriminator.scale_nets[scale_num].input_frames] = gen_frames
d_scale_preds = self.sess.run(discriminator.scale_preds, feed_dict=d_feed_dict)
# Add discriminator predictions to the
for i, preds in enumerate(d_scale_preds):
feed_dict[self.d_scale_preds[i]] = preds
_, global_loss, global_psnr_error, global_sharpdiff_error, global_step, summaries = \
self.sess.run([self.train_op,
self.global_loss,
self.psnr_error_train,
self.sharpdiff_error_train,
self.global_step,
self.summaries_train],
feed_dict=feed_dict)
##
# User output
##
if global_step % c.STATS_FREQ == 0:
print 'GeneratorModel : Step ', global_step
print ' Global Loss : ', global_loss
print ' PSNR Error : ', global_psnr_error
print ' Sharpdiff Error: ', global_sharpdiff_error
if global_step % c.SUMMARY_FREQ == 0:
self.summary_writer.add_summary(summaries, global_step)
print 'GeneratorModel: saved summaries'
if global_step % c.IMG_SAVE_FREQ == 0:
print '-' * 30
print 'Saving images...'
# if not adversarial, we didn't get the preds for each scale net before for the
# discriminator prediction, so do it now
if not c.ADVERSARIAL:
scale_preds = self.sess.run(self.scale_preds_train, feed_dict=feed_dict)
# re-generate scale gt_frames to avoid having to run through TensorFlow.
scale_gts = []
for scale_num in xrange(self.num_scale_nets):
scale_factor = 1. / 2 ** ((self.num_scale_nets - 1) - scale_num)
scale_height = int(self.height_train * scale_factor)
scale_width = int(self.width_train * scale_factor)
# resize gt_output_frames for scale and append to scale_gts_train
scaled_gt_frames = np.empty([c.BATCH_SIZE, scale_height, scale_width, 3])
for i, img in enumerate(gt_frames):
# for skimage.transform.resize, images need to be in range [0, 1], so normalize
# to [0, 1] before resize and back to [-1, 1] after
sknorm_img = (img / 2) + 0.5
resized_frame = resize(sknorm_img, [scale_height, scale_width, 3])
scaled_gt_frames[i] = (resized_frame - 0.5) * 2
scale_gts.append(scaled_gt_frames)
# for every clip in the batch, save the inputs, scale preds and scale gts
for pred_num in xrange(len(input_frames)):
pred_dir = c.get_dir(os.path.join(c.IMG_SAVE_DIR, 'Step_' + str(global_step),
str(pred_num)))
# save input images
for frame_num in xrange(c.HIST_LEN):
img = input_frames[pred_num, :, :, (frame_num * 3):((frame_num + 1) * 3)]
imsave(os.path.join(pred_dir, 'input_' + str(frame_num) + '.png'), img)
# save preds and gts at each scale
# noinspection PyUnboundLocalVariable
for scale_num, scale_pred in enumerate(scale_preds):
gen_img = scale_pred[pred_num]
path = os.path.join(pred_dir, 'scale' + str(scale_num))
gt_img = scale_gts[scale_num][pred_num]
imsave(path + '_gen.png', gen_img)
imsave(path + '_gt.png', gt_img)
print 'Saved images!'
print '-' * 30
return global_step
def test_batch(self, batch, global_step, num_rec_out=1, save_imgs=True):
"""
Runs a training step using the global loss on each of the scale networks.
@param batch: An array of shape
[batch_size x self.height x self.width x (3 * (c.HIST_LEN+ num_rec_out))].
A batch of the input and output frames, concatenated along the channel axis
(index 3).
@param global_step: The global step.
@param num_rec_out: The number of outputs to predict. Outputs > 1 are computed recursively,
using previously-generated frames as input. Default = 1.
@param save_imgs: Whether or not to save the input/output images to file. Default = True.
@return: A tuple of (psnr error, sharpdiff error) for the batch.
"""
if num_rec_out < 1:
raise ValueError('num_rec_out must be >= 1')
print '-' * 30
print 'Testing:'
##
# Split into inputs and outputs
##
input_frames = batch[:, :, :, :3 * c.HIST_LEN]
gt_frames = batch[:, :, :, 3 * c.HIST_LEN:]
##
# Generate num_rec_out recursive predictions
##
working_input_frames = deepcopy(input_frames) # input frames that will shift w/ recursion
rec_preds = []
rec_summaries = []
for rec_num in xrange(num_rec_out):
working_gt_frames = gt_frames[:, :, :, 3 * rec_num:3 * (rec_num + 1)]
feed_dict = {self.input_frames_test: working_input_frames,
self.gt_frames_test: working_gt_frames}
preds, psnr, sharpdiff, summaries = self.sess.run([self.scale_preds_test[-1],
self.psnr_error_test,
self.sharpdiff_error_test,
self.summaries_test],
feed_dict=feed_dict)
# remove first input and add new pred as last input
working_input_frames = np.concatenate(
[working_input_frames[:, :, :, 3:], preds], axis=3)
# add predictions and summaries
rec_preds.append(preds)
rec_summaries.append(summaries)
print 'Recursion ', rec_num
print 'PSNR Error : ', psnr
print 'Sharpdiff Error: ', sharpdiff
# write summaries
# TODO: Think of a good way to write rec output summaries - rn, just using first output.
self.summary_writer.add_summary(rec_summaries[0], global_step)
##
# Save images
##
if save_imgs:
for pred_num in xrange(len(input_frames)):
pred_dir = c.get_dir(os.path.join(
c.IMG_SAVE_DIR, 'Tests/Step_' + str(global_step), str(pred_num)))
# save input images
for frame_num in xrange(c.HIST_LEN):
img = input_frames[pred_num, :, :, (frame_num * 3):((frame_num + 1) * 3)]
imsave(os.path.join(pred_dir, 'input_' + str(frame_num) + '.png'), img)
# save recursive outputs
for rec_num in xrange(num_rec_out):
gen_img = rec_preds[rec_num][pred_num]
gt_img = gt_frames[pred_num, :, :, 3 * rec_num:3 * (rec_num + 1)]
imsave(os.path.join(pred_dir, 'gen_' + str(rec_num) + '.png'), gen_img)
imsave(os.path.join(pred_dir, 'gt_' + str(rec_num) + '.png'), gt_img)
print '-' * 30
| |
import logging
from datetime import timedelta
from hashlib import md5
from os import path
# Import some externalized utilities to work with the Telegram types and more
from . import helpers as utils
from .errors import (
RPCError, FloodWaitError, FileMigrateError, TypeNotFoundError
)
from .network import authenticator, MtProtoSender, TcpTransport
from .utils import get_appropriated_part_size
# For sending and receiving requests
from .tl import TLObject, JsonSession
from .tl.all_tlobjects import layer
from .tl.functions import (InitConnectionRequest, InvokeWithLayerRequest)
# Initial request
from .tl.functions.help import GetConfigRequest
from .tl.functions.auth import (
ImportAuthorizationRequest, ExportAuthorizationRequest
)
# Easier access for working with media
from .tl.functions.upload import (
GetFileRequest, SaveBigFilePartRequest, SaveFilePartRequest
)
# All the types we need to work with
from .tl.types import InputFile, InputFileBig
class TelegramBareClient:
"""Bare Telegram Client with just the minimum -
The reason to distinguish between a MtProtoSender and a
TelegramClient itself is because the sender is just that,
a sender, which should know nothing about Telegram but
rather how to handle this specific connection.
The TelegramClient itself should know how to initialize
a proper connection to the servers, as well as other basic
methods such as disconnection and reconnection.
This distinction between a bare client and a full client
makes it possible to create clones of the bare version
(by using the same session, IP address and port) to be
able to execute queries on either, without the additional
cost that would involve having the methods for signing in,
logging out, and such.
"""
# Current TelegramClient version
__version__ = '0.11.5'
# region Initialization
def __init__(self, session, api_id, api_hash,
proxy=None, timeout=timedelta(seconds=5)):
"""Initializes the Telegram client with the specified API ID and Hash.
Session must always be a Session instance, and an optional proxy
can also be specified to be used on the connection.
"""
self.session = session
self.api_id = int(api_id)
self.api_hash = api_hash
self.proxy = proxy
self._timeout = timeout
self._logger = logging.getLogger(__name__)
# Cache "exported" senders 'dc_id: TelegramBareClient' and
# their corresponding sessions not to recreate them all
# the time since it's a (somewhat expensive) process.
self._cached_clients = {}
# These will be set later
self.dc_options = None
self._sender = None
# endregion
# region Connecting
def connect(self, exported_auth=None):
"""Connects to the Telegram servers, executing authentication if
required. Note that authenticating to the Telegram servers is
not the same as authenticating the desired user itself, which
may require a call (or several) to 'sign_in' for the first time.
If 'exported_auth' is not None, it will be used instead to
determine the authorization key for the current session.
"""
if self._sender and self._sender.is_connected():
self._logger.debug(
'Attempted to connect when the client was already connected.'
)
return
transport = TcpTransport(self.session.server_address,
self.session.port,
proxy=self.proxy,
timeout=self._timeout)
try:
if not self.session.auth_key:
self.session.auth_key, self.session.time_offset = \
authenticator.do_authentication(transport)
self.session.save()
self._sender = MtProtoSender(transport, self.session)
self._sender.connect()
# Now it's time to send an InitConnectionRequest
# This must always be invoked with the layer we'll be using
if exported_auth is None:
query = GetConfigRequest()
else:
query = ImportAuthorizationRequest(
exported_auth.id, exported_auth.bytes)
request = InitConnectionRequest(
api_id=self.api_id,
device_model=self.session.device_model,
system_version=self.session.system_version,
app_version=self.session.app_version,
lang_code=self.session.lang_code,
system_lang_code=self.session.system_lang_code,
lang_pack='', # "langPacks are for official apps only"
query=query)
result = self(InvokeWithLayerRequest(
layer=layer, query=request
))
if exported_auth is not None:
result = self(GetConfigRequest())
# We're only interested in the DC options,
# although many other options are available!
self.dc_options = result.dc_options
return True
except TypeNotFoundError as e:
# This is fine, probably layer migration
self._logger.debug('Found invalid item, probably migrating', e)
self.disconnect()
self.connect(exported_auth=exported_auth)
except (RPCError, ConnectionError) as error:
# Probably errors from the previous session, ignore them
self.disconnect()
self._logger.debug('Could not stabilise initial connection: {}'
.format(error))
return False
def disconnect(self):
"""Disconnects from the Telegram server"""
if self._sender:
self._sender.disconnect()
self._sender = None
def reconnect(self, new_dc=None):
"""Disconnects and connects again (effectively reconnecting).
If 'new_dc' is not None, the current authorization key is
removed, the DC used is switched, and a new connection is made.
"""
self.disconnect()
if new_dc is not None:
self.session.auth_key = None # Force creating new auth_key
dc = self._get_dc(new_dc)
self.session.server_address = dc.ip_address
self.session.port = dc.port
self.session.save()
self.connect()
# endregion
# region Properties
def set_timeout(self, timeout):
if timeout is None:
self._timeout = None
elif isinstance(timeout, int) or isinstance(timeout, float):
self._timeout = timedelta(seconds=timeout)
elif isinstance(timeout, timedelta):
self._timeout = timeout
else:
raise ValueError(
'{} is not a valid type for a timeout'.format(type(timeout))
)
if self._sender:
self._sender.transport.timeout = self._timeout
def get_timeout(self):
return self._timeout
timeout = property(get_timeout, set_timeout)
# endregion
# region Working with different Data Centers
def _get_dc(self, dc_id):
"""Gets the Data Center (DC) associated to 'dc_id'"""
if not self.dc_options:
raise ConnectionError(
'Cannot determine the required data center IP address. '
'Stabilise a successful initial connection first.')
return next(dc for dc in self.dc_options if dc.id == dc_id)
def _get_exported_client(self, dc_id,
init_connection=False,
bypass_cache=False):
"""Gets a cached exported TelegramBareClient for the desired DC.
If it's the first time retrieving the TelegramBareClient, the
current authorization is exported to the new DC so that
it can be used there, and the connection is initialized.
If after using the sender a ConnectionResetError is raised,
this method should be called again with init_connection=True
in order to perform the reconnection.
If bypass_cache is True, a new client will be exported and
it will not be cached.
"""
# Thanks badoualy/kotlogram on /telegram/api/DefaultTelegramClient.kt
# for clearly showing how to export the authorization! ^^
client = self._cached_clients.get(dc_id)
if client and not bypass_cache:
if init_connection:
client.reconnect()
return client
else:
dc = self._get_dc(dc_id)
# Export the current authorization to the new DC.
export_auth = self(ExportAuthorizationRequest(dc_id))
# Create a temporary session for this IP address, which needs
# to be different because each auth_key is unique per DC.
#
# Construct this session with the connection parameters
# (system version, device model...) from the current one.
session = JsonSession(self.session)
session.server_address = dc.ip_address
session.port = dc.port
client = TelegramBareClient(
session, self.api_id, self.api_hash,
timeout=self._timeout
)
client.connect(exported_auth=export_auth)
if not bypass_cache:
# Don't go through this expensive process every time.
self._cached_clients[dc_id] = client
return client
# endregion
# region Invoking Telegram requests
def invoke(self, request, updates=None):
"""Invokes (sends) a MTProtoRequest and returns (receives) its result.
If 'updates' is not None, all read update object will be put
in such list. Otherwise, update objects will be ignored.
"""
if not isinstance(request, TLObject) and not request.content_related:
raise ValueError('You can only invoke requests, not types!')
if not self._sender:
raise ValueError('You must be connected to invoke requests!')
try:
self._sender.send(request)
u = [] # Save updates here to update the state
self._sender.receive(request, updates=u)
self.session.update_state.update_state(request.result)
if u:
for update in u:
self.session.update_state.update_state(update)
if updates is not None:
updates.extend(u)
return request.result
except ConnectionResetError:
self._logger.debug('Server disconnected us. Reconnecting and '
'resending request...')
self.reconnect()
return self.invoke(request)
except FloodWaitError:
self.disconnect()
raise
# Let people use client(SomeRequest()) instead client.invoke(...)
__call__ = invoke
# endregion
# region Uploading media
def upload_file(self,
file_path,
part_size_kb=None,
file_name=None,
progress_callback=None):
"""Uploads the specified file_path and returns a handle (an instance
of InputFile or InputFileBig, as required) which can be later used.
If 'progress_callback' is not None, it should be a function that
takes two parameters, (bytes_uploaded, total_bytes).
Default values for the optional parameters if left as None are:
part_size_kb = get_appropriated_part_size(file_size)
file_name = path.basename(file_path)
"""
file_size = path.getsize(file_path)
if not part_size_kb:
part_size_kb = get_appropriated_part_size(file_size)
if part_size_kb > 512:
raise ValueError('The part size must be less or equal to 512KB')
part_size = int(part_size_kb * 1024)
if part_size % 1024 != 0:
raise ValueError('The part size must be evenly divisible by 1024')
# Determine whether the file is too big (over 10MB) or not
# Telegram does make a distinction between smaller or larger files
is_large = file_size > 10 * 1024 * 1024
part_count = (file_size + part_size - 1) // part_size
file_id = utils.generate_random_long()
hash_md5 = md5()
with open(file_path, 'rb') as file:
for part_index in range(part_count):
# Read the file by in chunks of size part_size
part = file.read(part_size)
# The SavePartRequest is different depending on whether
# the file is too large or not (over or less than 10MB)
if is_large:
request = SaveBigFilePartRequest(file_id, part_index,
part_count, part)
else:
request = SaveFilePartRequest(file_id, part_index, part)
result = self(request)
if result:
if not is_large:
# No need to update the hash if it's a large file
hash_md5.update(part)
if progress_callback:
progress_callback(file.tell(), file_size)
else:
raise ValueError('Failed to upload file part {}.'
.format(part_index))
# Set a default file name if None was specified
if not file_name:
file_name = path.basename(file_path)
if is_large:
return InputFileBig(file_id, part_count, file_name)
else:
return InputFile(file_id, part_count, file_name,
md5_checksum=hash_md5.hexdigest())
# endregion
# region Downloading media
def download_file(self,
input_location,
file,
part_size_kb=None,
file_size=None,
progress_callback=None):
"""Downloads the given InputFileLocation to file (a stream or str).
If 'progress_callback' is not None, it should be a function that
takes two parameters, (bytes_downloaded, total_bytes). Note that
'total_bytes' simply equals 'file_size', and may be None.
"""
if not part_size_kb:
if not file_size:
part_size_kb = 64 # Reasonable default
else:
part_size_kb = get_appropriated_part_size(file_size)
part_size = int(part_size_kb * 1024)
if part_size % 1024 != 0:
raise ValueError('The part size must be evenly divisible by 1024.')
if isinstance(file, str):
# Ensure that we'll be able to download the media
utils.ensure_parent_dir_exists(file)
f = open(file, 'wb')
else:
f = file
# The used client will change if FileMigrateError occurs
client = self
try:
offset_index = 0
while True:
offset = offset_index * part_size
try:
result = client(
GetFileRequest(input_location, offset, part_size))
except FileMigrateError as e:
client = self._get_exported_client(e.new_dc)
continue
offset_index += 1
# If we have received no data (0 bytes), the file is over
# So there is nothing left to download and write
if not result.bytes:
return result.type # Return some extra information
f.write(result.bytes)
if progress_callback:
progress_callback(f.tell(), file_size)
finally:
if isinstance(file, str):
f.close()
# endregion
| |
'''
Created on 06.08.2014
'''
import unittest
import types
import time
try:
from java.lang import System
except:
pass
class GCDetector():
gcIndex = 0
def __del__(self):
GCDetector.gcIndex += 1
maxGCRun = 10
def runGCIfJython():
try:
currentIndex = GCDetector.gcIndex
gcCount = 0
detector = GCDetector()
detector = None
while currentIndex == GCDetector.gcIndex and gcCount < maxGCRun:
System.gc()
gcCount += 1
time.sleep(0.1)
except:
pass
finalizeMsgList = []
verbose = False
resurrectedObject_I = None
resurrectedObject_J = None
resurrectedObject_K = None
resurrectedObject_L = None
resurrectedObject_M = None
resurrectedObject_N = None
class ResurrectableDummyClass():
def __init__(self, name):
self.name = name
self.doResurrection = True
def __str__(self):
return self.name
class ResurrectableDummyClassNew(object):
def __init__(self, name):
self.name = name
self.doResurrection = True
def __str__(self):
return self.name
def __del__I(self):
global resurrectedObject_I
finalizeMsgList.append(str(self)+" finalized (ResurrectableDummyClass)")
if verbose:
print str(self)+" finalized (ResurrectableDummyClass)"
if self.doResurrection:
resurrectedObject_I = self
def __del__J(self):
global resurrectedObject_J
finalizeMsgList.append(str(self)+" finalized (ResurrectableDummyClass)")
if verbose:
print str(self)+" finalized (ResurrectableDummyClass)"
if self.doResurrection:
resurrectedObject_J = self
def __del__K(self):
global resurrectedObject_K
finalizeMsgList.append(str(self)+" finalized (ResurrectableDummyClass)")
if verbose:
print str(self)+" finalized (ResurrectableDummyClass)"
if self.doResurrection:
resurrectedObject_K = self
def __del__L(self):
global resurrectedObject_L
finalizeMsgList.append(str(self)+" finalized (ResurrectableDummyClass)")
if verbose:
print str(self)+" finalized (ResurrectableDummyClass)"
if self.doResurrection:
resurrectedObject_L = self
def __del__M(self):
global resurrectedObject_M
finalizeMsgList.append(str(self)+" finalized (ResurrectableDummyClass)")
if verbose:
print str(self)+" finalized (ResurrectableDummyClass)"
if self.doResurrection:
resurrectedObject_M = self
def __del__N(self):
global resurrectedObject_N
finalizeMsgList.append(str(self)+" finalized (ResurrectableDummyClass)")
if verbose:
print str(self)+" finalized (ResurrectableDummyClass)"
if self.doResurrection:
resurrectedObject_N = self
delI = __del__I
delJ = __del__J
delK = __del__K
delL = __del__L
delM = __del__M
delN = __del__N
class DummyClass():
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class DummyClassDel():
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __del__(self):
finalizeMsgList.append(str(self)+" finalized (DummyClassDel)")
if verbose:
print str(self)+" finalized (DummyClassDel)"
class DummyClassNew(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class DummyClassDelNew(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __del__(self):
finalizeMsgList.append(str(self)+" finalized (DummyClassDelNew)")
if verbose:
print str(self)+" finalized (DummyClassDelNew)"
class DummyFileClassNew(file):
def __init__(self, name):
self.name0 = name
def __str__(self):
return self.name0
def __del__(self):
finalizeMsgList.append(str(self)+" finalized (DummyFileClassNew)")
if verbose:
print str(self)+" finalized (DummyFileClassNew)"
def __del__class(self):
finalizeMsgList.append(str(self)+" finalized (acquired by class)")
if verbose:
print str(self)+" finalized (acquired by class)"
def __del__object(self):
finalizeMsgList.append(str(self)+" finalized (acquired by object)")
if verbose:
print str(self)+" finalized (acquired by object)"
def __del__object0():
finalizeMsgList.append("_ finalized (acquired by object)")
if verbose:
print "_ finalized (acquired by object)"
delClass = __del__class
delObject = __del__object
delObject0 = __del__object0
class TestFinalizers(unittest.TestCase):
def test_finalizer_builtin_oldStyleClass(self):
A = DummyClassDel("A")
A = None
runGCIfJython()
self.assertIn("A finalized (DummyClassDel)", finalizeMsgList)
def test_classAcquiresFinalizer_beforeInstanciation_oldStyleClass(self):
DummyClass.__del__ = delClass
B = DummyClass("B")
B = None
runGCIfJython()
self.assertIn("B finalized (acquired by class)", finalizeMsgList)
del DummyClass.__del__
def test_classAcquiresFinalizer_afterInstanciation_oldStyleClass(self):
#okay to fail in Jython without the manual __ensure_finalizer__ call
C = DummyClass("C")
DummyClass.__del__ = delClass
try:
C.__ensure_finalizer__()
except:
pass
C = None
runGCIfJython()
self.assertIn("C finalized (acquired by class)", finalizeMsgList)
del DummyClass.__del__
def test_instanceAcquiresFinalizer_bound_oldStyleClass(self):
D = DummyClassDel("D")
dl = types.MethodType(delObject, D.name)
D.__del__ = dl
D = None
runGCIfJython()
self.assertNotIn("D finalized (DummyClassDel)", finalizeMsgList)
self.assertIn("D finalized (acquired by object)", finalizeMsgList)
def test_finalizer_builtin_newStyleClass(self):
E = DummyClassDelNew("E")
E = None
runGCIfJython()
self.assertIn("E finalized (DummyClassDelNew)", finalizeMsgList)
def test_classAcquiresFinalizer_beforeInstanciation_newStyleClass(self):
DummyClassNew.__del__ = delClass
F = DummyClassNew("F")
F = None
runGCIfJython()
self.assertIn("F finalized (acquired by class)", finalizeMsgList)
del DummyClassNew.__del__
def test_classAcquiresFinalizer_afterInstanciation_newStyleClass(self):
#okay to fail in Jython without the manual __ensure_finalizer__ call
G = DummyClassNew("G")
DummyClassNew.__del__ = delClass
try:
G.__ensure_finalizer__()
except:
pass
G = None
runGCIfJython()
self.assertIn("G finalized (acquired by class)", finalizeMsgList)
del DummyClassNew.__del__
def test_instanceAcquiresFinalizer_bound_newStyleClass(self):
"""
It seems, CPython prohibits new style instances from acquiring a finalizer.
"""
H = DummyClassDelNew("H")
H.__del__ = types.MethodType(delObject, H.name)
H = None
runGCIfJython()
self.assertIn("H finalized (DummyClassDelNew)", finalizeMsgList)
self.assertNotIn("H finalized (acquired by object)", finalizeMsgList)
def test_instanceAcquiresFinalizer_bound_newStyleClass2(self):
"""
In CPython, new style instances can't acquire a finalizer.
If one calls the instance-acquired __del__ manually, it works, but the gc
will still call the old one.
"""
H = DummyClassDelNew("H2")
H.__del__ = types.MethodType(delObject, H.name)
H.__del__()
H = None
runGCIfJython()
self.assertIn("H2 finalized (DummyClassDelNew)", finalizeMsgList)
self.assertIn("H2 finalized (acquired by object)", finalizeMsgList)
def test_objectResurrection_oldStyleClass(self):
ResurrectableDummyClass.__del__ = delI
I = ResurrectableDummyClass("I")
I = None
runGCIfJython()
self.assertIn("I finalized (ResurrectableDummyClass)", finalizeMsgList)
self.assertEqual(str(resurrectedObject_I), "I")
def test_objectDoubleResurrection_oldStyleClass(self):
#okay to fail in Jython without the manual ensureFinalizer calls
ResurrectableDummyClass.__del__ = delJ
J = ResurrectableDummyClass("J")
J = None
runGCIfJython()
self.assertIn("J finalized (ResurrectableDummyClass)", finalizeMsgList)
global resurrectedObject_J
self.assertEqual(str(resurrectedObject_J), "J")
J = resurrectedObject_J
resurrectedObject_J = None
self.assertIsNone(resurrectedObject_J)
try:
#For Jython one can restore the finalizer manually.
#This is offered as an easy fix if the CPython behavior
#in this test should be needed for some reason.
J.__ensure_finalizer__()
except:
pass
J = None
runGCIfJython()
self.assertEqual(str(resurrectedObject_J), "J")
resurrectedObject_J.doResurrection = False
try:
#again...
resurrectedObject_J.__ensure_finalizer__()
except:
pass
resurrectedObject_J = None
runGCIfJython()
self.assertIsNone(resurrectedObject_J)
def test_objectDoubleResurrectionAndFinalize_oldStyleClass(self):
#okay to fail in Jython without the manual __ensure_finalizer__ calls
ResurrectableDummyClass.__del__ = delK
K = ResurrectableDummyClass("K")
K = None
runGCIfJython()
self.assertIn("K finalized (ResurrectableDummyClass)", finalizeMsgList)
finalizeMsgList.remove("K finalized (ResurrectableDummyClass)")
self.assertNotIn("K finalized (ResurrectableDummyClass)", finalizeMsgList)
global resurrectedObject_K
self.assertEqual(str(resurrectedObject_K), "K")
K = resurrectedObject_K
resurrectedObject_K = None
self.assertIsNone(resurrectedObject_K)
try:
K.__ensure_finalizer__()
except:
pass
K = None
runGCIfJython()
self.assertIn("K finalized (ResurrectableDummyClass)", finalizeMsgList)
self.assertEqual(str(resurrectedObject_K), "K")
def test_objectResurrection_newStyleClass(self):
ResurrectableDummyClassNew.__del__ = delL
L = ResurrectableDummyClassNew("L")
L = None
runGCIfJython()
self.assertIn("L finalized (ResurrectableDummyClass)", finalizeMsgList)
self.assertEqual(str(resurrectedObject_L), "L")
def test_objectDoubleResurrection_newStyleClass(self):
#okay to fail in Jython without the manual __ensure_finalizer__ calls
ResurrectableDummyClassNew.__del__ = delM
M = ResurrectableDummyClassNew("M")
M = None
runGCIfJython()
self.assertIn("M finalized (ResurrectableDummyClass)", finalizeMsgList)
global resurrectedObject_M
self.assertEqual(str(resurrectedObject_M), "M")
M = resurrectedObject_M
resurrectedObject_M = None
self.assertIsNone(resurrectedObject_M, None)
try:
M.__ensure_finalizer__()
except:
pass
M = None
runGCIfJython()
self.assertEqual(str(resurrectedObject_M), "M")
def test_objectDoubleResurrectionAndFinalize_newStyleClass(self):
#okay to fail in Jython without the manual __ensure_finalizer__ calls
ResurrectableDummyClassNew.__del__ = delN
N = ResurrectableDummyClassNew("N")
N = None
runGCIfJython()
self.assertIn("N finalized (ResurrectableDummyClass)", finalizeMsgList)
finalizeMsgList.remove("N finalized (ResurrectableDummyClass)")
self.assertNotIn("N finalized (ResurrectableDummyClass)", finalizeMsgList)
global resurrectedObject_N
self.assertEqual(str(resurrectedObject_N), "N")
N = resurrectedObject_N
resurrectedObject_N = None
self.assertIsNone(resurrectedObject_N)
try:
N.__ensure_finalizer__()
except:
pass
N = None
runGCIfJython()
self.assertIn("N finalized (ResurrectableDummyClass)", finalizeMsgList)
self.assertEqual(str(resurrectedObject_N), "N")
def test_file_overwrite_del(self):
O = DummyFileClassNew("O")
O = None
runGCIfJython()
self.assertIn("O finalized (DummyFileClassNew)", finalizeMsgList)
if __name__ == '__main__':
unittest.main()
| |
#
# TAP.py - TAP parser
#
# A pyparsing parser to process the output of the Perl
# "Test Anything Protocol"
# (http://search.cpan.org/~petdance/TAP-1.00/TAP.pm)
#
# TAP output lines are preceded or followed by a test number range:
# 1..n
# with 'n' TAP output lines.
#
# The general format of a TAP output line is:
# ok/not ok (required)
# Test number (recommended)
# Description (recommended)
# Directive (only when necessary)
#
# A TAP output line may also indicate abort of the test suit with the line:
# Bail out!
# optionally followed by a reason for bailing
#
# Copyright 2008, by Paul McGuire
#
from pyparsingOD import ParserElement,LineEnd,Optional,Word,nums,Regex,\
Literal,CaselessLiteral,Group,OneOrMore,Suppress,restOfLine,\
FollowedBy,empty
__all__ = ['tapOutputParser', 'TAPTest', 'TAPSummary']
# newlines are significant whitespace, so set default skippable
# whitespace to just spaces and tabs
ParserElement.setDefaultWhitespaceChars(" \t")
NL = LineEnd().suppress()
integer = Word(nums)
plan = '1..' + integer("ubound")
OK,NOT_OK = map(Literal,['ok','not ok'])
testStatus = (OK | NOT_OK)
description = Regex("[^#\n]+")
description.setParseAction(lambda t:t[0].lstrip('- '))
TODO,SKIP = map(CaselessLiteral,'TODO SKIP'.split())
directive = Group(Suppress('#') + (TODO + restOfLine |
FollowedBy(SKIP) +
restOfLine.copy().setParseAction(lambda t:['SKIP',t[0]]) ))
commentLine = Suppress("#") + empty + restOfLine
testLine = Group(
Optional(OneOrMore(commentLine + NL))("comments") +
testStatus("passed") +
Optional(integer)("testNumber") +
Optional(description)("description") +
Optional(directive)("directive")
)
bailLine = Group(Literal("Bail out!")("BAIL") +
empty + Optional(restOfLine)("reason"))
tapOutputParser = Optional(Group(plan)("plan") + NL) & \
Group(OneOrMore((testLine|bailLine) + NL))("tests")
class TAPTest(object):
def __init__(self,results):
self.num = results.testNumber
self.passed = (results.passed=="ok")
self.skipped = self.todo = False
if results.directive:
self.skipped = (results.directive[0][0]=='SKIP')
self.todo = (results.directive[0][0]=='TODO')
@classmethod
def bailedTest(cls,num):
ret = TAPTest(empty.parseString(""))
ret.num = num
ret.skipped = True
return ret
class TAPSummary(object):
def __init__(self,results):
self.passedTests = []
self.failedTests = []
self.skippedTests = []
self.todoTests = []
self.bonusTests = []
self.bail = False
if results.plan:
expected = list(range(1, int(results.plan.ubound)+1))
else:
expected = list(range(1,len(results.tests)+1))
for i,res in enumerate(results.tests):
# test for bail out
if res.BAIL:
#~ print "Test suite aborted: " + res.reason
#~ self.failedTests += expected[i:]
self.bail = True
self.skippedTests += [ TAPTest.bailedTest(ii) for ii in expected[i:] ]
self.bailReason = res.reason
break
#~ print res.dump()
testnum = i+1
if res.testNumber != "":
if testnum != int(res.testNumber):
print("ERROR! test %(testNumber)s out of sequence" % res)
testnum = int(res.testNumber)
res["testNumber"] = testnum
test = TAPTest(res)
if test.passed:
self.passedTests.append(test)
else:
self.failedTests.append(test)
if test.skipped: self.skippedTests.append(test)
if test.todo: self.todoTests.append(test)
if test.todo and test.passed: self.bonusTests.append(test)
self.passedSuite = not self.bail and (set(self.failedTests)-set(self.todoTests) == set())
def summary(self, showPassed=False, showAll=False):
testListStr = lambda tl : "[" + ",".join(str(t.num) for t in tl) + "]"
summaryText = []
if showPassed or showAll:
summaryText.append( "PASSED: %s" % testListStr(self.passedTests) )
if self.failedTests or showAll:
summaryText.append( "FAILED: %s" % testListStr(self.failedTests) )
if self.skippedTests or showAll:
summaryText.append( "SKIPPED: %s" % testListStr(self.skippedTests) )
if self.todoTests or showAll:
summaryText.append( "TODO: %s" % testListStr(self.todoTests) )
if self.bonusTests or showAll:
summaryText.append( "BONUS: %s" % testListStr(self.bonusTests) )
if self.passedSuite:
summaryText.append( "PASSED" )
else:
summaryText.append( "FAILED" )
return "\n".join(summaryText)
# create TAPSummary objects from tapOutput parsed results, by setting
# class as parse action
tapOutputParser.setParseAction(TAPSummary)
if __name__ == "__main__":
test1 = """\
1..4
ok 1 - Input file opened
not ok 2 - First line of the input valid
ok 3 - Read the rest of the file
not ok 4 - Summarized correctly # TODO Not written yet
"""
test2 = """\
ok 1
not ok 2 some description # TODO with a directive
ok 3 a description only, no directive
ok 4 # TODO directive only
ok a description only, no directive
ok # Skipped only a directive, no description
ok
"""
test3 = """\
ok - created Board
ok
ok
not ok
ok
ok
ok
ok
# +------+------+------+------+
# | |16G | |05C |
# | |G N C | |C C G |
# | | G | | C +|
# +------+------+------+------+
# |10C |01G | |03C |
# |R N G |G A G | |C C C |
# | R | G | | C +|
# +------+------+------+------+
# | |01G |17C |00C |
# | |G A G |G N R |R N R |
# | | G | R | G |
# +------+------+------+------+
ok - board has 7 tiles + starter tile
1..9
"""
test4 = """\
1..4
ok 1 - Creating test program
ok 2 - Test program runs, no error
not ok 3 - infinite loop # TODO halting problem unsolved
not ok 4 - infinite loop 2 # TODO halting problem unsolved
"""
test5 = """\
1..20
ok - database handle
not ok - failed database login
Bail out! Couldn't connect to database.
"""
test6 = """\
ok 1 - retrieving servers from the database
# need to ping 6 servers
ok 2 - pinged diamond
ok 3 - pinged ruby
not ok 4 - pinged sapphire
ok 5 - pinged onyx
not ok 6 - pinged quartz
ok 7 - pinged gold
1..7
"""
for test in (test1,test2,test3,test4,test5,test6):
print(test)
tapResult = tapOutputParser.parseString(test)[0]
print(tapResult.summary(showAll=True))
print()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from ..describe import Description, autoDescribeRoute
from ..rest import Resource, filtermodel
from girder.models.model_base import AccessException
from girder.constants import AccessType, SettingKey
from girder.utility import mail_utils
from girder.api import access
class Group(Resource):
"""API Endpoint for groups."""
def __init__(self):
super(Group, self).__init__()
self.resourceName = 'group'
self.route('DELETE', (':id',), self.deleteGroup)
self.route('DELETE', (':id', 'member'), self.removeFromGroup)
self.route('DELETE', (':id', 'moderator'), self.demote)
self.route('DELETE', (':id', 'admin'), self.demote)
self.route('GET', (), self.find)
self.route('GET', (':id',), self.getGroup)
self.route('GET', (':id', 'access'), self.getGroupAccess)
self.route('GET', (':id', 'invitation'), self.getGroupInvitations)
self.route('GET', (':id', 'member'), self.listMembers)
self.route('POST', (), self.createGroup)
self.route('POST', (':id', 'invitation'), self.inviteToGroup)
self.route('POST', (':id', 'member'), self.joinGroup)
self.route('POST', (':id', 'moderator'), self.promoteToModerator)
self.route('POST', (':id', 'admin'), self.promoteToAdmin)
self.route('PUT', (':id',), self.updateGroup)
@access.public
@filtermodel(model='group')
@autoDescribeRoute(
Description('Search for groups or list all groups.')
.param('text', 'Pass this to perform a full-text search for groups.', required=False)
.param('exact', 'If true, only return exact name matches. This is '
'case sensitive.', required=False, dataType='boolean', default=False)
.pagingParams(defaultSort='name')
.errorResponse()
)
def find(self, text, exact, limit, offset, sort, params):
user = self.getCurrentUser()
if text is not None:
if exact:
groupList = self.model('group').find(
{'name': text}, offset=offset, limit=limit, sort=sort)
else:
groupList = self.model('group').textSearch(
text, user=user, offset=offset, limit=limit, sort=sort)
else:
groupList = self.model('group').list(
user=user, offset=offset, limit=limit, sort=sort)
return list(groupList)
@access.user
@filtermodel(model='group')
@autoDescribeRoute(
Description('Create a new group.')
.responseClass('Group')
.notes('Must be logged in.')
.param('name', 'Unique name for the group.', strip=True)
.param('description', 'Description of the group.', required=False,
default='', strip=True)
.param('public', 'Whether the group should be publicly visible.',
required=False, dataType='boolean', default=False)
.errorResponse()
.errorResponse('Write access was denied on the parent', 403)
)
def createGroup(self, name, description, public, params):
return self.model('group').createGroup(
name=name, creator=self.getCurrentUser(), description=description, public=public)
@access.public
@filtermodel(model='group')
@autoDescribeRoute(
Description('Get a group by ID.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the group.', 403)
)
def getGroup(self, group, params):
# Add in the current setting for adding to groups
group['_addToGroupPolicy'] = self.model('setting').get(SettingKey.ADD_TO_GROUP_POLICY)
return group
@access.public
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description('Get the access control list for a group.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the group.', 403)
)
def getGroupAccess(self, group, params):
groupModel = self.model('group')
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.public
@filtermodel(model='user')
@autoDescribeRoute(
Description('Show outstanding invitations for a group.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.READ)
.pagingParams(defaultSort='lastName')
.errorResponse()
.errorResponse('Read access was denied for the group.', 403)
)
def getGroupInvitations(self, group, limit, offset, sort, params):
return list(self.model('group').getInvites(group, limit, offset, sort))
@access.user
@filtermodel(model='group')
@autoDescribeRoute(
Description('Update a group by ID.')
.modelParam('id', model='group', level=AccessType.WRITE)
.param('name', 'The name to set on the group.', required=False, strip=True)
.param('description', 'Description for the group.', required=False, strip=True)
.param('public', 'Whether the group should be publicly visible', dataType='boolean',
required=False)
.param('addAllowed', 'Can admins or moderators directly add members '
'to this group? Only system administrators are allowed to '
'set this field', required=False,
enum=['default', 'no', 'yesmod', 'yesadmin'])
.errorResponse()
.errorResponse('Write access was denied for the group.', 403)
)
def updateGroup(self, group, name, description, public, addAllowed, params):
if public is not None:
self.model('group').setPublic(group, public)
if name is not None:
group['name'] = name
if description is not None:
group['description'] = description
if addAllowed is not None:
self.requireAdmin(self.getCurrentUser())
group['addAllowed'] = addAllowed
return self.model('group').updateGroup(group)
@access.user
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description('Request to join a group, or accept an invitation to join.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.READ)
.errorResponse('ID was invalid.')
.errorResponse('You were not invited to this group, or do not have '
'read access to it.', 403)
)
def joinGroup(self, group, params):
groupModel = self.model('group')
group = groupModel.joinGroup(group, self.getCurrentUser())
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.public
@filtermodel(model='user')
@autoDescribeRoute(
Description('List members of a group.')
.modelParam('id', model='group', level=AccessType.READ)
.pagingParams(defaultSort='lastName')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the group.', 403)
)
def listMembers(self, group, limit, offset, sort, params):
return list(self.model('group').listMembers(group, offset=offset, limit=limit, sort=sort))
@access.user
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description("Invite a user to join a group, or accept a user's request to join.")
.responseClass('Group')
.notes('The "force" option to this endpoint is only available to '
'administrators and can be used to bypass the invitation process'
' and instead add the user directly to the group.')
.modelParam('id', model='group', level=AccessType.WRITE)
.modelParam('userId', 'The ID of the user to invite or accept.',
destName='userToInvite', level=AccessType.READ, paramType='form')
.param('level', 'The access level the user will be given when they accept the invitation.',
required=False, dataType='integer', default=AccessType.READ)
.param('quiet', 'If you do not want this action to send an email to '
'the target user, set this to true.', dataType='boolean',
required=False, default=False)
.param('force', 'Add user directly rather than sending an invitation '
'(admin-only option).', dataType='boolean', required=False, default=False)
.errorResponse()
.errorResponse('Write access was denied for the group.', 403)
)
def inviteToGroup(self, group, userToInvite, level, quiet, force, params):
groupModel = self.model('group')
user = self.getCurrentUser()
if force:
if not user['admin']:
mustBeAdmin = True
addPolicy = self.model('setting').get(SettingKey.ADD_TO_GROUP_POLICY)
addGroup = group.get('addAllowed', 'default')
if addGroup not in ['no', 'yesadmin', 'yesmod']:
addGroup = addPolicy
if (groupModel.hasAccess(
group, user, AccessType.ADMIN) and
('mod' in addPolicy or 'admin' in addPolicy) and
addGroup.startswith('yes')):
mustBeAdmin = False
elif (groupModel.hasAccess(
group, user, AccessType.WRITE) and
'mod' in addPolicy and
addGroup == 'yesmod'):
mustBeAdmin = False
if mustBeAdmin:
self.requireAdmin(user)
groupModel.addUser(group, userToInvite, level=level)
else:
# Can only invite into access levels that you yourself have
groupModel.requireAccess(group, user, level)
groupModel.inviteUser(group, userToInvite, level)
if not quiet:
html = mail_utils.renderTemplate('groupInvite.mako', {
'userToInvite': userToInvite,
'user': user,
'group': group
})
mail_utils.sendEmail(
to=userToInvite['email'], text=html,
subject="Girder: You've been invited to a group")
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.user
@filtermodel(model='group', addFields={'access'})
@autoDescribeRoute(
Description('Promote a member to be a moderator of the group.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.ADMIN)
.modelParam('userId', 'The ID of the user to promote.',
level=AccessType.READ, paramType='formData')
.errorResponse('ID was invalid.')
.errorResponse("You don't have permission to promote users.", 403)
)
def promoteToModerator(self, group, user, params):
return self._promote(group, user, AccessType.WRITE)
@access.user
@filtermodel(model='group', addFields={'access'})
@autoDescribeRoute(
Description('Promote a member to be an administrator of the group.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.ADMIN)
.modelParam('userId', 'The ID of the user to promote.',
level=AccessType.READ, paramType='formData')
.errorResponse('ID was invalid.')
.errorResponse("You don't have permission to promote users.", 403)
)
def promoteToAdmin(self, group, user, params):
return self._promote(group, user, AccessType.ADMIN)
def _promote(self, group, user, level):
"""
Promote a user to moderator or administrator.
:param group: The group to promote within.
:param user: The user to promote.
:param level: Either WRITE or ADMIN, for moderator or administrator.
:type level: AccessType
:returns: The updated group document.
"""
if not group['_id'] in user.get('groups', []):
raise AccessException('That user is not a group member.')
group = self.model('group').setUserAccess(group, user, level=level, save=True)
group['access'] = self.model('group').getFullAccessList(group)
return group
@access.user
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description('Demote a user to a normal group member.')
.responseClass('Group')
.modelParam('id', model='group', level=AccessType.ADMIN)
.modelParam('userId', 'The ID of the user to demote.',
level=AccessType.READ, paramType='formData')
.errorResponse()
.errorResponse("You don't have permission to demote users.", 403)
)
def demote(self, group, user, params):
groupModel = self.model('group')
group = groupModel.setUserAccess(group, user, level=AccessType.READ, save=True)
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.user
@filtermodel(model='group', addFields={'access', 'requests'})
@autoDescribeRoute(
Description('Remove a user from a group, or uninvite them.')
.responseClass('Group')
.notes('If the specified user is not yet a member of the group, this '
'will delete any outstanding invitation or membership request for '
'the user. Passing no userId parameter will assume that the '
'current user is removing themself.')
.modelParam('id', model='group', level=AccessType.READ)
.modelParam('userId', 'The ID of the user to remove. If not passed, will '
'remove yourself from the group.', required=False,
level=AccessType.READ, destName='userToRemove', paramType='formData')
.errorResponse()
.errorResponse("You don't have permission to remove that user.", 403)
)
def removeFromGroup(self, group, userToRemove, params):
user = self.getCurrentUser()
groupModel = self.model('group')
if userToRemove is None:
# Assume user is removing themself from the group
userToRemove = user
# If removing someone else, you must have at least as high an
# access level as they do, and you must have at least write access
# to remove any user other than yourself.
if user['_id'] != userToRemove['_id']:
if groupModel.hasAccess(group, userToRemove, AccessType.ADMIN):
groupModel.requireAccess(group, user, AccessType.ADMIN)
else:
groupModel.requireAccess(group, user, AccessType.WRITE)
group = groupModel.removeUser(group, userToRemove)
group['access'] = groupModel.getFullAccessList(group)
group['requests'] = list(groupModel.getFullRequestList(group))
return group
@access.user
@autoDescribeRoute(
Description('Delete a group by ID.')
.modelParam('id', model='group', level=AccessType.ADMIN)
.errorResponse('ID was invalid.')
.errorResponse('Admin access was denied for the group.', 403)
)
def deleteGroup(self, group, params):
self.model('group').remove(group)
return {'message': 'Deleted the group %s.' % group['name']}
| |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
"""build query for doclistview and return results"""
import webnotes, json
import webnotes.defaults
tables = None
doctypes = {}
roles = []
@webnotes.whitelist()
def get():
return compress(execute(**get_form_params()))
def get_form_params():
data = webnotes._dict(webnotes.form_dict)
del data["cmd"]
if isinstance(data.get("filters"), basestring):
data["filters"] = json.loads(data["filters"])
if isinstance(data.get("fields"), basestring):
data["fields"] = json.loads(data["fields"])
if isinstance(data.get("docstatus"), basestring):
data["docstatus"] = json.loads(data["docstatus"])
return data
def execute(doctype, query=None, filters=None, fields=None, docstatus=None,
group_by=None, order_by=None, limit_start=0, limit_page_length=None,
as_list=False, with_childnames=False, debug=False):
if query:
return run_custom_query(query)
if not filters: filters = []
if not docstatus: docstatus = []
args = prepare_args(doctype, filters, fields, docstatus, group_by, order_by, with_childnames)
args.limit = add_limit(limit_start, limit_page_length)
query = """select %(fields)s from %(tables)s where %(conditions)s
%(group_by)s order by %(order_by)s %(limit)s""" % args
return webnotes.conn.sql(query, as_dict=not as_list, debug=debug)
def prepare_args(doctype, filters, fields, docstatus, group_by, order_by, with_childnames):
global tables
tables = get_tables(doctype, fields)
load_doctypes()
remove_user_tags(doctype, fields)
conditions = build_conditions(doctype, fields, filters, docstatus)
args = webnotes._dict()
if with_childnames:
for t in tables:
if t != "`tab" + doctype + "`":
fields.append(t + ".name as '%s:name'" % t[4:-1])
# query dict
args.tables = ', '.join(tables)
args.conditions = ' and '.join(conditions)
args.fields = ', '.join(fields)
args.order_by = order_by or tables[0] + '.modified desc'
args.group_by = group_by and (" group by " + group_by) or ""
check_sort_by_table(args.order_by)
return args
def compress(data):
"""separate keys and values"""
if not data: return data
values = []
keys = data[0].keys()
for row in data:
new_row = []
for key in keys:
new_row.append(row[key])
values.append(new_row)
return {
"keys": keys,
"values": values
}
def check_sort_by_table(sort_by):
"""check atleast 1 column selected from the sort by table """
if "." in sort_by:
tbl = sort_by.split('.')[0]
if tbl not in tables:
if tbl.startswith('`'):
tbl = tbl[4:-1]
webnotes.msgprint("Please select atleast 1 column from '%s' to sort"\
% tbl, raise_exception=1)
def run_custom_query(query):
"""run custom query"""
if '%(key)s' in query:
query = query.replace('%(key)s', 'name')
return webnotes.conn.sql(query, as_dict=1)
def load_doctypes():
"""load all doctypes and roles"""
global doctypes, roles
import webnotes.model.doctype
roles = webnotes.get_roles()
for t in tables:
if t.startswith('`'):
doctype = t[4:-1]
if not webnotes.has_permission(doctype):
raise webnotes.PermissionError, doctype
doctypes[doctype] = webnotes.model.doctype.get(doctype)
def remove_user_tags(doctype, fields):
"""remove column _user_tags if not in table"""
for fld in fields:
if '_user_tags' in fld:
if not '_user_tags' in get_table_columns(doctype):
del fields[fields.index(fld)]
break
def add_limit(limit_start, limit_page_length):
if limit_page_length:
return 'limit %s, %s' % (limit_start, limit_page_length)
else:
return ''
def build_conditions(doctype, fields, filters, docstatus):
"""build conditions"""
if docstatus:
conditions = [tables[0] + '.docstatus in (' + ','.join(docstatus) + ')']
else:
# default condition
conditions = [tables[0] + '.docstatus < 2']
# make conditions from filters
build_filter_conditions(filters, conditions)
# join parent, child tables
for tname in tables[1:]:
conditions.append(tname + '.parent = ' + tables[0] + '.name')
# match conditions
match_conditions = build_match_conditions(doctype, fields)
if match_conditions:
conditions.append(match_conditions)
return conditions
def build_filter_conditions(filters, conditions):
"""build conditions from user filters"""
from webnotes.utils import cstr
global tables
if not tables: tables = []
for f in filters:
if isinstance(f, basestring):
conditions.append(f)
else:
tname = ('`tab' + f[0] + '`')
if not tname in tables:
tables.append(tname)
# prepare in condition
if f[2] in ['in', 'not in']:
opts = ["'" + t.strip().replace("'", "\\'") + "'" for t in f[3].split(',')]
f[3] = "(" + ', '.join(opts) + ")"
conditions.append(tname + '.' + f[1] + " " + f[2] + " " + f[3])
else:
if isinstance(f[3], basestring):
f[3] = "'" + f[3].replace("'", "\\'") + "'"
conditions.append(tname + '.' + f[1] + " " + f[2] + " " + f[3])
else:
conditions.append('ifnull(' + tname + '.' + f[1] + ",0) " + f[2] \
+ " " + cstr(f[3]))
def build_match_conditions(doctype, fields=None, as_condition=True):
"""add match conditions if applicable"""
global tables, roles
match_filters = {}
match_conditions = []
match = True
if not tables or not doctypes:
tables = get_tables(doctype, fields)
load_doctypes()
if not roles:
roles = webnotes.get_roles()
for d in doctypes[doctype]:
if d.doctype == 'DocPerm' and d.parent == doctype:
if d.role in roles:
if d.match: # role applicable
if ':' in d.match:
document_key, default_key = d.match.split(":")
else:
default_key = document_key = d.match
for v in webnotes.defaults.get_user_default_as_list(default_key, \
webnotes.session.user) or ["** No Match **"]:
if as_condition:
match_conditions.append('`tab%s`.%s="%s"' % (doctype,
document_key, v))
else:
if v:
match_filters.setdefault(document_key, [])
if v not in match_filters[document_key]:
match_filters[document_key].append(v)
elif d.read == 1 and d.permlevel == 0:
# don't restrict if another read permission at level 0
# exists without a match restriction
match = False
match_filters = {}
if as_condition:
conditions = ""
if match_conditions and match:
conditions = '('+ ' or '.join(match_conditions) +')'
doctype_conditions = get_doctype_conditions(doctype)
if doctype_conditions:
conditions += ' and ' + doctype_conditions if conditions else doctype_conditions
return conditions
else:
return match_filters
def get_doctype_conditions(doctype):
from webnotes.model.code import load_doctype_module
module = load_doctype_module(doctype)
if module and hasattr(module, 'get_match_conditions'):
return getattr(module, 'get_match_conditions')()
def get_tables(doctype, fields):
"""extract tables from fields"""
tables = ['`tab' + doctype + '`']
# add tables from fields
if fields:
for f in fields:
if "." not in f: continue
table_name = f.split('.')[0]
if table_name.lower().startswith('group_concat('):
table_name = table_name[13:]
if table_name.lower().startswith('ifnull('):
table_name = table_name[7:]
if not table_name[0]=='`':
table_name = '`' + table_name + '`'
if not table_name in tables:
tables.append(table_name)
return tables
@webnotes.whitelist()
def save_report():
"""save report"""
from webnotes.model.doc import Document
data = webnotes.form_dict
if webnotes.conn.exists('Report', data['name']):
d = Document('Report', data['name'])
else:
d = Document('Report')
d.report_name = data['name']
d.ref_doctype = data['doctype']
d.report_type = "Report Builder"
d.json = data['json']
webnotes.bean([d]).save()
webnotes.msgprint("%s saved." % d.name)
return d.name
@webnotes.whitelist()
def export_query():
"""export from report builder"""
# TODO: validate use is allowed to export
verify_export_allowed()
ret = execute(**get_form_params())
columns = [x[0] for x in webnotes.conn.get_description()]
data = [['Sr'] + get_labels(columns),]
# flatten dict
cnt = 1
for row in ret:
flat = [cnt,]
for c in columns:
flat.append(row.get(c))
data.append(flat)
cnt += 1
# convert to csv
from cStringIO import StringIO
import csv
f = StringIO()
writer = csv.writer(f)
for r in data:
# encode only unicode type strings and not int, floats etc.
writer.writerow(map(lambda v: isinstance(v, unicode) and v.encode('utf-8') or v, r))
f.seek(0)
webnotes.response['result'] = unicode(f.read(), 'utf-8')
webnotes.response['type'] = 'csv'
webnotes.response['doctype'] = [t[4:-1] for t in tables][0]
def verify_export_allowed():
"""throw exception if user is not allowed to export"""
global roles
roles = webnotes.get_roles()
if not ('Administrator' in roles or 'System Manager' in roles or 'Report Manager' in roles):
raise webnotes.PermissionError
def get_labels(columns):
"""get column labels based on column names"""
label_dict = {}
for doctype in doctypes:
for d in doctypes[doctype]:
if d.doctype=='DocField' and d.fieldname:
label_dict[d.fieldname] = d.label
return map(lambda x: label_dict.get(x, x.title()), columns)
@webnotes.whitelist()
def delete_items():
"""delete selected items"""
import json
from webnotes.model import delete_doc
from webnotes.model.code import get_obj
il = json.loads(webnotes.form_dict.get('items'))
doctype = webnotes.form_dict.get('doctype')
for d in il:
try:
dt_obj = get_obj(doctype, d)
if hasattr(dt_obj, 'on_trash'):
dt_obj.on_trash()
delete_doc(doctype, d)
except Exception, e:
webnotes.errprint(webnotes.getTraceback())
pass
@webnotes.whitelist()
def get_stats(stats, doctype):
"""get tag info"""
import json
tags = json.loads(stats)
stats = {}
columns = get_table_columns(doctype)
for tag in tags:
if not tag in columns: continue
tagcount = execute(doctype, fields=[tag, "count(*)"],
filters=["ifnull(%s,'')!=''" % tag], group_by=tag, as_list=True)
if tag=='_user_tags':
stats[tag] = scrub_user_tags(tagcount)
else:
stats[tag] = tagcount
return stats
def scrub_user_tags(tagcount):
"""rebuild tag list for tags"""
rdict = {}
tagdict = dict(tagcount)
for t in tagdict:
alltags = t.split(',')
for tag in alltags:
if tag:
if not tag in rdict:
rdict[tag] = 0
rdict[tag] += tagdict[t]
rlist = []
for tag in rdict:
rlist.append([tag, rdict[tag]])
return rlist
def get_table_columns(table):
res = webnotes.conn.sql("DESC `tab%s`" % table, as_dict=1)
if res: return [r['Field'] for r in res]
# used in building query in queries.py
def get_match_cond(doctype, searchfield = 'name'):
cond = build_match_conditions(doctype)
if cond:
cond = ' and ' + cond
else:
cond = ''
return cond
| |
import codecs
import copy
import io
import pickle
from io import BytesIO, StringIO, TextIOBase
import pytest
from pytest import raises
import jprops
from jprops import text_type
@pytest.mark.parametrize('lines', [
b'a\nb\nc\n', # Unix
b'a\r\nb\r\nc\r\n', # Windows
b'a\rb\rc\r', # Mac
])
def test_property_lines_platform_line_endings(lines):
expected = [u'a', u'b', u'c']
property_lines = lambda fp: list(jprops._property_lines(fp))
assert property_lines(BytesIO(lines)) == expected
assert property_lines(StringIO(lines.decode('ascii'))) == expected
@pytest.mark.parametrize('lines,expected', [
# skips blanks
(b'a\nb\n \t \n\nc\n', [u'a', u'b', u'c']),
# includes comments
(b'a\nb\n#foo\n!bar\nc\n', [u'a', u'b', u'#foo', u'!bar', u'c']),
# continuation
(b'a\nb\\\nc\nd\n', [u'a', u'bc', u'd']),
# continuation includes trailing blanks
(b'a\nb \\\nc\nd\n', [u'a', u'b c', u'd']),
# continuation skips leading blanks
(b'a\nb\\\n c\nd\n', [u'a', u'bc', u'd']),
# escaped backslash is not a continuation
(b'a\nb\\\\\nc\nd\n', [u'a', u'b\\\\', u'c', u'd']),
# escaped backslash before continuation
(b'a\nb\\\\\\\nc\nd\n', [u'a', u'b\\\\c', u'd']),
])
def test_property_lines_splitting(lines, expected):
property_lines = lambda fp: list(jprops._property_lines(fp))
assert property_lines(BytesIO(lines)) == expected
assert property_lines(StringIO(lines.decode('ascii'))) == expected
@pytest.mark.parametrize('line,expected', [
# with equals separator
(u'a=b', (u'a', u'b')),
(u'a= b', (u'a', u'b')),
(u'a = b', (u'a', u'b')),
(u'a =b', (u'a', u'b')),
# with colon separator
(u'a:b', (u'a', u'b')),
(u'a: b', (u'a', u'b')),
(u'a : b', (u'a', u'b')),
(u'a :b', (u'a', u'b')),
# only space separator
(u'a b', (u'a', u'b')),
# additional key terminator after already terminated
(u'a : : b', (u'a', u': b')),
(u'a::b', (u'a', u':b')),
(u'a = = b', (u'a', u'= b')),
(u'a==b', (u'a', u'=b')),
(u'a = : b', (u'a', u': b')),
(u'a : = b', (u'a', u'= b')),
# key terminator escaped
(u'a\\=b = c', (u'a\\=b', u'c')),
(u'a\\:b\\=c : d', (u'a\\:b\\=c', u'd')),
# empty value
(u'a', (u'a', u'')),
# comment
(u'#foo', (jprops.COMMENT, u'foo')),
# non-ascii
(u'\u00ff=\u00fe', (u'\u00ff', u'\u00fe')),
])
def test_split_key_value(line, expected):
assert jprops._split_key_value(line) == expected
@pytest.mark.parametrize('value,expected', [
# basic whitespace escapes
(u'\\t', '\t'),
(u'\\n', '\n'),
(u'\\f', '\f'),
(u'\\r', '\r'),
# unrecognized escape should just return the character
(u'\\=', '='),
(u'\\:', ':'),
(u'\\b', 'b'),
# unicode \u escapes
(u'\\u00ff', u'\u00ff'),
# backslash encoded as \u unicode escape
(u'\\u005cb', '\\b'),
# unicode with escaped backslashes
(u'\\\\u00ff', '\\u00ff'),
(u'\\\\\\u00ff', u'\\\u00ff'),
(u'\\\\\\\\u00ff', '\\\\u00ff'),
])
def test_unescape(value, expected):
actual = jprops._unescape(value)
assert actual == expected
assert type(actual) == type(expected)
@pytest.mark.parametrize('value,expected', [
# basic
(u'\\', u'\\\\'),
(u'\t', u'\\t'),
(u'\n', u'\\n'),
(u'\f', u'\\f'),
(u'\r', u'\\r'),
# escape comment markers
(u'#', u'\\#'),
(u'!', u'\\!'),
])
def test_escape(value, expected):
actual = jprops._escape(value)
assert actual == expected
assert type(actual) == type(expected)
@pytest.mark.parametrize('value,expected', [
# leading whitespace in value
(u' x\ty ', u'\\ \\ x\\ty '),
# space in middle value does not need escaped
(u'x y', u'x y'),
# key terminator in value
(u'=', u'\\='),
(u':', u'\\:'),
])
def test_escape_value(value, expected):
actual = jprops._escape_value(value)
assert actual == expected
assert type(actual) == type(expected)
@pytest.mark.parametrize('key,expected', [
(u'=', u'\\='),
(u':', u'\\:'),
(u' x ', u'\\ x\\ '),
])
def test_escape_keys(key, expected):
actual = jprops._escape_key(key)
assert actual == expected
assert type(actual) == type(expected)
@pytest.mark.parametrize('comment,expected', [
# newlines in comments should start the next line with a comment
(u'foo\nbar', u'#foo\n#bar'),
(u'foo\n\nbar', u'#foo\n#\n#bar'),
(u'foo\rbar', u'#foo\n#bar'),
(u'foo\r\rbar', u'#foo\n#\n#bar'),
(u'foo\r\nbar', u'#foo\n#bar'),
(u'foo\r\n\r\nbar', u'#foo\n#\n#bar'),
(u'foo\n', u'#foo\n#'),
# if the newline is already followed by a comment marker, keep it
(u'foo\n#bar', u'#foo\n#bar'),
(u'foo\n!bar', u'#foo\n!bar'),
])
def test_escape_comment_newline(comment, expected):
assert jprops._escape_comment(comment) == expected
@pytest.mark.parametrize('key,value,expected', [
(u'\x00', u'', b'\\u0000='),
(u'\u0000', u'', b'\\u0000='),
(u'\x19', u'', b'\\u0019='),
(u'\u0019', u'', b'\\u0019='),
(u'\x7f', u'', b'\\u007f='),
(u'\u007f', u'', b'\\u007f='),
(u'\uffff', u'', b'\\uffff='),
(jprops.COMMENT, u'\u00ff', b'#\xff'),
(jprops.COMMENT, u'\u0100', b'#\\u0100'),
])
def test_escape_unicode_in_bytes_output(key, value, expected):
b = BytesIO()
jprops.write_property(b, key, value)
actual = b.getvalue()[:-1] # strip the trailing newline
assert actual == expected
@pytest.mark.parametrize('key,value,expected', [
(u'\x00', u'', u'\u0000='),
(u'\u0000', u'', u'\u0000='),
(u'\x19', u'', u'\u0019='),
(u'\u0019', u'', u'\u0019='),
(u'\x7f', u'', u'\u007f='),
(u'\u007f', u'', u'\u007f='),
(u'\uffff', u'', u'\uffff='),
(jprops.COMMENT, u'\u00ff', u'#\xff'),
(jprops.COMMENT, u'\u0100', u'#\u0100'),
])
def test_unicode_in_text_output_not_escaped(key, value, expected):
b = StringIO()
jprops.write_property(b, key, value)
actual = b.getvalue()[:-1] # strip the trailing newline
assert actual == expected
def test_write_non_string_is_an_error():
with raises(TypeError):
jprops.write_property(BytesIO(), b'x', 1)
with raises(TypeError):
jprops.write_property(BytesIO(), 1, b'x')
def test_iter_properties_ignores_comments_by_default():
fp = BytesIO(b'a\n#foo\nb\n')
assert list(jprops.iter_properties(fp)) == [('a', ''), ('b', '')]
def test_iter_properties_includes_comments():
fp = BytesIO(b'a\n#foo\nb\n')
assert (list(jprops.iter_properties(fp, comments=True)) ==
[('a', ''), (jprops.COMMENT, 'foo'), ('b', '')])
def test_write_property_with_comment():
fp = BytesIO()
jprops.write_property(fp, jprops.COMMENT, 'foo')
assert fp.getvalue() == b'#foo\n'
def test_read_text():
fp = StringIO(u'a=\u00ff\n')
assert list(jprops.iter_properties(fp)) == [(u'a', u'\u00ff')]
def test_read_bytes():
fp = BytesIO(b'a=\\u00ff\n')
assert list(jprops.iter_properties(fp)) == [(u'a', u'\u00ff')]
def test_write_text():
fp = StringIO()
jprops.write_property(fp, u'a', u'\u00ff')
assert fp.getvalue() == u'a=\u00ff\n'
def test_write_bytes():
fp = BytesIO()
jprops.write_property(fp, u'a', u'\u00ff')
assert fp.getvalue() == b'a=\\u00ff\n'
def builtin_open(path, mode, encoding, newline):
if 'w' in mode and newline:
# jprops handles newline splitting on read, but relies on the underlying
# file for writing
raise pytest.skip("built-in open doesn't support newline for writing")
if 'r' in mode and not newline:
# for reading use universal-newlines support if newline is None or ''
mode += 'U'
if encoding is None:
return open(path, mode)
elif jprops.PY2:
raise pytest.skip("Python 2 built-in open doesn't support encoding")
return open(path, mode, encoding=encoding)
def codecs_open(path, mode, encoding, newline):
if 'w' in mode and newline:
# jprops handles newline splitting on read, but relies on the underlying
# file for writing
pytest.skip("codecs.open doesn't support newline modes")
return codecs.open(path, mode, encoding=encoding)
def io_open(path, mode, encoding, newline):
if 'b' in mode and newline is not None:
raise pytest.skip("io.open binary mode doesn't take a newline argument")
return io.open(path, mode, encoding=encoding, newline=newline)
@pytest.mark.parametrize('opener', [
builtin_open,
codecs_open,
io_open,
])
@pytest.mark.parametrize('encoding,file_data', [
(None, b'a=\\u0100\n'),
('utf-8', u'a=\u0100\n'.encode('utf-8')),
])
@pytest.mark.parametrize('mode', [
'r', 'w',
])
@pytest.mark.parametrize('newline', [
None, '', '\n', '\r', '\r\n',
])
def test_file_modes(tmpdir, opener, encoding, file_data, mode, newline):
# check common combinations of various methods of opening files with different
# encodings and line-endings
if encoding is None:
mode += 'b'
expected_props = {u'a': u'\u0100'}
if newline:
file_data = file_data.replace(b'\n', newline.encode('ascii'))
open_path = lambda path: opener(
str(path),
mode,
encoding=encoding,
newline=newline,
)
if 'r' in mode:
read_path = tmpdir.join('reading.properties')
read_path.write_binary(file_data)
with open_path(read_path) as fp:
actual_props = jprops.load_properties(fp)
assert actual_props == expected_props
else:
write_path = tmpdir.join('writing.properties')
with open_path(write_path) as fp:
jprops.store_properties(fp, expected_props, timestamp=False)
actual_data = write_path.read_binary()
assert actual_data == file_data
def test_comment_identity():
assert copy.copy(jprops.COMMENT) is jprops.COMMENT
assert copy.deepcopy(jprops.COMMENT) is jprops.COMMENT
assert pickle.loads(pickle.dumps(jprops.COMMENT)) is jprops.COMMENT
| |
#encoding: utf-8
import datetime
from hashlib import sha1
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.db.models.options import get_verbose_name as convert_camelcase
from django.utils import simplejson as json
from django.utils.http import urlquote_plus
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.template import loader, Context, Template, TemplateDoesNotExist
from philo.contrib.sobol.utils import make_tracking_querydict
from philo.utils.registry import Registry
if getattr(settings, 'SOBOL_USE_EVENTLET', False):
try:
from eventlet.green import urllib2
except:
import urllib2
else:
import urllib2
__all__ = (
'Result', 'BaseSearch', 'DatabaseSearch', 'URLSearch', 'JSONSearch', 'GoogleSearch', 'registry', 'get_search_instance'
)
SEARCH_CACHE_SEED = 'philo_sobol_search_results'
USE_CACHE = getattr(settings, 'SOBOL_USE_CACHE', True)
#: A registry for :class:`BaseSearch` subclasses that should be available in the admin.
registry = Registry()
def _make_cache_key(search, search_arg):
return sha1(SEARCH_CACHE_SEED + search.slug + search_arg).hexdigest()
def get_search_instance(slug, search_arg):
"""Returns a search instance for the given slug, either from the cache or newly-instantiated."""
search = registry[slug]
search_arg = search_arg.lower()
if USE_CACHE:
key = _make_cache_key(search, search_arg)
cached = cache.get(key)
if cached:
return cached
instance = search(search_arg)
instance.slug = slug
return instance
class Result(object):
"""
:class:`Result` is a helper class that, given a search and a result of that search, is able to correctly render itself with a template defined by the search. Every :class:`Result` will pass a ``title``, a ``url`` (if applicable), and the raw ``result`` returned by the search into the template context when rendering.
:param search: An instance of a :class:`BaseSearch` subclass or an object that implements the same API.
:param result: An arbitrary result from the ``search``.
"""
def __init__(self, search, result):
self.search = search
self.result = result
def get_title(self):
"""Returns the title of the result by calling :meth:`BaseSearch.get_result_title` on the raw result."""
return self.search.get_result_title(self.result)
def get_url(self):
"""Returns the url of the result or ``None`` by calling :meth:`BaseSearch.get_result_url` on the raw result. This url will contain a querystring which, if used, will track a :class:`.Click` for the actual url."""
return self.search.get_result_url(self.result)
def get_actual_url(self):
"""Returns the actual url of the result by calling :meth:`BaseSearch.get_actual_result_url` on the raw result."""
return self.search.get_actual_result_url(self.result)
def get_content(self):
"""Returns the content of the result by calling :meth:`BaseSearch.get_result_content` on the raw result."""
return self.search.get_result_content(self.result)
def get_template(self):
"""Returns the template which will be used to render the :class:`Result` by calling :meth:`BaseSearch.get_result_template` on the raw result."""
return self.search.get_result_template(self.result)
def get_context(self):
"""
Returns the context dictionary for the result. This is used both in rendering the result and in the AJAX return value for :meth:`.SearchView.ajax_api_view`. The context will contain the following keys:
title
The result of calling :meth:`get_title`
url
The result of calling :meth:`get_url`
content
The result of calling :meth:`get_content`
"""
if not hasattr(self, '_context'):
self._context = {
'title': self.get_title(),
'url': self.get_url(),
'actual_url': self.get_actual_url(),
'content': self.get_content()
}
return self._context
def render(self):
"""Returns the template from :meth:`get_template` rendered with the context from :meth:`get_context`."""
t = self.get_template()
c = Context(self.get_context())
return t.render(c)
def __unicode__(self):
"""Returns :meth:`render`"""
return self.render()
class BaseSearchMetaclass(type):
def __new__(cls, name, bases, attrs):
if 'verbose_name' not in attrs:
attrs['verbose_name'] = capfirst(' '.join(convert_camelcase(name).rsplit(' ', 1)[:-1]))
if 'slug' not in attrs:
attrs['slug'] = name[:-6].lower() if name.endswith("Search") else name.lower()
return super(BaseSearchMetaclass, cls).__new__(cls, name, bases, attrs)
class BaseSearch(object):
"""
Defines a generic search api. Accessing :attr:`results` will attempt to retrieve cached results and, if that fails, will initiate a new search and store the results in the cache. Each search has a ``verbose_name`` and a ``slug``. If these are not provided as attributes, they will be automatically generated based on the name of the class.
:param search_arg: The string which is being searched for.
"""
__metaclass__ = BaseSearchMetaclass
#: The number of results to return from the complete list. Default: 5
result_limit = 5
#: How long the items for the search should be cached (in minutes). Default: 48 hours.
_cache_timeout = 60*48
#: The path to the template which will be used to render the :class:`Result`\ s for this search. If this is ``None``, then the framework will try ``sobol/search/<slug>/result.html`` and ``sobol/search/result.html``.
result_template = None
#: The path to the template which will be used to generate the title of the :class:`Result`\ s for this search. If this is ``None``, then the framework will try ``sobol/search/<slug>/title.html`` and ``sobol/search/title.html``.
title_template = None
#: The path to the template which will be used to generate the content of the :class:`Result`\ s for this search. If this is ``None``, then the framework will try ``sobol/search/<slug>/content.html`` and ``sobol/search/content.html``.
content_template = None
def __init__(self, search_arg):
self.search_arg = search_arg
@property
def results(self):
"""Retrieves cached results or initiates a new search via :meth:`get_results` and caches the results."""
if not hasattr(self, '_results'):
try:
# Cache one extra result so we can see if there are
# more results to be had.
limit = self.result_limit
if limit is not None:
limit += 1
results = self.get_results(limit)
except:
if settings.DEBUG:
raise
# On exceptions, don't set any cache; just return.
return []
self._results = results
if USE_CACHE:
for result in results:
result.get_context()
key = _make_cache_key(self, self.search_arg)
cache.set(key, self, self._cache_timeout)
return self._results
def get_results(self, limit=None, result_class=Result):
"""
Calls :meth:`search` and parses the return value into :class:`Result` instances.
:param limit: Passed directly to :meth:`search`.
:param result_class: The class used to represent the results. This will be instantiated with the :class:`BaseSearch` instance and the raw result from the search.
"""
results = self.search(limit)
return [result_class(self, result) for result in results]
def search(self, limit=None):
"""Returns an iterable of up to ``limit`` results. The :meth:`get_result_title`, :meth:`get_result_url`, :meth:`get_result_template`, and :meth:`get_result_extra_context` methods will be used to interpret the individual items that this function returns, so the result can be an object with attributes as easily as a dictionary with keys. However, keep in mind that the raw results will be stored with django's caching mechanisms and will be converted to JSON."""
raise NotImplementedError
def get_actual_result_url(self, result):
"""Returns the actual URL for the ``result`` or ``None`` if there is no URL. Must be implemented by subclasses."""
raise NotImplementedError
def get_result_querydict(self, result):
"""Returns a querydict for tracking selection of the result, or ``None`` if there is no URL for the result."""
url = self.get_actual_result_url(result)
if url is None:
return None
return make_tracking_querydict(self.search_arg, url)
def get_result_url(self, result):
"""Returns ``None`` or a url which, when accessed, will register a :class:`.Click` for that url."""
qd = self.get_result_querydict(result)
if qd is None:
return None
return "?%s" % qd.urlencode()
def get_result_title(self, result):
"""Returns the title of the ``result``. By default, renders ``sobol/search/<slug>/title.html`` or ``sobol/search/title.html`` with the result in the context. This can be overridden by setting :attr:`title_template` or simply overriding :meth:`get_result_title`. If no template can be found, this will raise :exc:`TemplateDoesNotExist`."""
return loader.render_to_string(self.title_template or [
'sobol/search/%s/title.html' % self.slug,
'sobol/search/title.html'
], {'result': result})
def get_result_content(self, result):
"""Returns the content for the ``result``. By default, renders ``sobol/search/<slug>/content.html`` or ``sobol/search/content.html`` with the result in the context. This can be overridden by setting :attr:`content_template` or simply overriding :meth:`get_result_content`. If no template is found, this will return an empty string."""
try:
return loader.render_to_string(self.content_template or [
'sobol/search/%s/content.html' % self.slug,
'sobol/search/content.html'
], {'result': result})
except TemplateDoesNotExist:
return ""
def get_result_template(self, result):
"""Returns the template to be used for rendering the ``result``. For a search with slug ``google``, this would first try ``sobol/search/google/result.html``, then fall back on ``sobol/search/result.html``. Subclasses can override this by setting :attr:`result_template` to the path of another template."""
if self.result_template:
return loader.get_template(self.result_template)
return loader.select_template([
'sobol/search/%s/result.html' % self.slug,
'sobol/search/result.html'
])
@property
def has_more_results(self):
"""Returns ``True`` if there are more results than :attr:`result_limit` and ``False`` otherwise."""
return len(self.results) > self.result_limit
def get_actual_more_results_url(self):
"""Returns the actual url for more results. By default, simply returns ``None``."""
return None
def get_more_results_querydict(self):
"""Returns a :class:`QueryDict` for tracking whether people click on a 'more results' link."""
url = self.get_actual_more_results_url()
if url:
return make_tracking_querydict(self.search_arg, url)
return None
@property
def more_results_url(self):
"""Returns a URL which consists of a querystring which, when accessed, will log a :class:`.Click` for the actual URL."""
qd = self.get_more_results_querydict()
if qd is None:
return None
return "?%s" % qd.urlencode()
def __unicode__(self):
return self.verbose_name
class DatabaseSearch(BaseSearch):
"""Implements :meth:`~BaseSearch.search` and :meth:`get_queryset` methods to handle database queries."""
#: The model which should be searched by the :class:`DatabaseSearch`.
model = None
def search(self, limit=None):
if not hasattr(self, '_qs'):
self._qs = self.get_queryset()
if limit is not None:
self._qs = self._qs[:limit]
return self._qs
def get_queryset(self):
"""Returns a :class:`QuerySet` of all instances of :attr:`model`. This method should be overridden by subclasses to specify how the search should actually be implemented for the model."""
return self.model._default_manager.all()
class URLSearch(BaseSearch):
"""Defines a generic interface for searches that require accessing a certain url to get search results."""
#: The base URL which will be accessed to get the search results.
search_url = ''
#: The url-encoded query string to be used for fetching search results from :attr:`search_url`. Must have one ``%s`` to contain the search argument.
query_format_str = "%s"
@property
def url(self):
"""The URL where the search gets its results. Composed from :attr:`search_url` and :attr:`query_format_str`."""
return self.search_url + self.query_format_str % urlquote_plus(self.search_arg)
def get_actual_more_results_url(self):
return self.url
def parse_response(self, response, limit=None):
"""Handles the ``response`` from accessing :attr:`url` (with :func:`urllib2.urlopen`) and returns a list of up to ``limit`` results."""
raise NotImplementedError
def search(self, limit=None):
return self.parse_response(urllib2.urlopen(self.url), limit=limit)
class JSONSearch(URLSearch):
"""Makes a GET request and parses the results as JSON. The default behavior assumes that the response contains a list of results."""
def parse_response(self, response, limit=None):
return json.loads(response.read())[:limit]
class GoogleSearch(JSONSearch):
"""An example implementation of a :class:`JSONSearch`."""
search_url = "http://ajax.googleapis.com/ajax/services/search/web"
_cache_timeout = 60
verbose_name = "Google search (current site)"
_more_results_url = None
@property
def query_format_str(self):
default_args = self.default_args
if default_args:
default_args += " "
return "?v=1.0&q=%s%%s" % urlquote_plus(default_args).replace('%', '%%')
@property
def default_args(self):
"""Unquoted default arguments for the :class:`GoogleSearch`."""
return "site:%s" % Site.objects.get_current().domain
def parse_response(self, response, limit=None):
responseData = json.loads(response.read())['responseData']
results, cursor = responseData['results'], responseData['cursor']
if results:
self._more_results_url = cursor['moreResultsUrl']
self._estimated_result_count = cursor['estimatedResultCount']
return results[:limit]
@property
def url(self):
# Google requires that an ajax request have a proper Referer header.
return urllib2.Request(
super(GoogleSearch, self).url,
None,
{'Referer': "http://%s" % Site.objects.get_current().domain}
)
@property
def has_more_results(self):
if self.results and len(self.results) < self._estimated_result_count:
return True
return False
def get_actual_more_results_url(self):
return self._more_results_url
def get_actual_result_url(self, result):
return result['unescapedUrl']
def get_result_title(self, result):
return mark_safe(result['titleNoFormatting'])
def get_result_content(self, result):
return mark_safe(result['content'])
registry.register(GoogleSearch)
try:
from BeautifulSoup import BeautifulSoup, SoupStrainer, BeautifulStoneSoup
except:
pass
else:
__all__ += ('ScrapeSearch', 'XMLSearch',)
class ScrapeSearch(URLSearch):
"""A base class for scrape-style searching, available if :mod:`BeautifulSoup` is installed."""
#: Arguments to be passed into a :class:`SoupStrainer`.
strainer_args = []
#: Keyword arguments to be passed into a :class:`SoupStrainer`.
strainer_kwargs = {}
@property
def strainer(self):
"""
Caches and returns a :class:`SoupStrainer` initialized with :attr:`strainer_args` and :attr:`strainer_kwargs`. This strainer will be used to parse only certain parts of the document.
.. seealso:: `BeautifulSoup: Improving Performance by Parsing Only Part of the Document <http://www.crummy.com/software/BeautifulSoup/documentation.html#Improving%20Performance%20by%20Parsing%20Only%20Part%20of%20the%20Document>`_
"""
if not hasattr(self, '_strainer'):
self._strainer = SoupStrainer(*self.strainer_args, **self.strainer_kwargs)
return self._strainer
def parse_response(self, response, limit=None):
strainer = self.strainer
soup = BeautifulSoup(response, parseOnlyThese=strainer)
return self.parse_results(soup.findAll(recursive=False, limit=limit))
def parse_results(self, results):
"""
Provides a hook for parsing the results of straining. This has no default behavior and must be implemented by subclasses because the results absolutely must be parsed to properly extract the information.
.. seealso:: `BeautifulSoup: Improving Memory Usage with extract <http://www.crummy.com/software/BeautifulSoup/documentation.html#Improving%20Memory%20Usage%20with%20extract>`_
"""
raise NotImplementedError
class XMLSearch(ScrapeSearch):
"""A base class for searching XML results."""
#: Self-closing tag names to be used when interpreting the XML document
#:
#: .. seealso:: `BeautifulSoup: Parsing XML <http://www.crummy.com/software/BeautifulSoup/documentation.html#Parsing%20XML>`_
self_closing_tags = []
def parse_response(self, response, limit=None):
strainer = self.strainer
soup = BeautifulStoneSoup(response, selfClosingTags=self.self_closing_tags, parseOnlyThese=strainer)
return self.parse_results(soup.findAll(recursive=False, limit=limit))
| |
#!/usr/bin/env python3.4
#
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import str
import os
import sys
from mobly import keys
from mobly import utils
# An environment variable defining the base location for Mobly logs.
_ENV_MOBLY_LOGPATH = 'MOBLY_LOGPATH'
class MoblyConfigError(Exception):
"""Raised when there is a problem in test configuration file."""
def _validate_test_config(test_config):
"""Validates the raw configuration loaded from the config file.
Making sure all the required fields exist.
"""
for k in keys.Config.reserved_keys.value:
if k not in test_config:
raise MoblyConfigError('Required key %s missing in test config.' % k)
def _validate_testbed_name(name):
"""Validates the name of a test bed.
Since test bed names are used as part of the test run id, it needs to meet
certain requirements.
Args:
name: The test bed's name specified in config file.
Raises:
If the name does not meet any criteria, MoblyConfigError is raised.
"""
if not name:
raise MoblyConfigError("Test bed names can't be empty.")
if not isinstance(name, str):
raise MoblyConfigError('Test bed names have to be string.')
for l in name:
if l not in utils.valid_filename_chars:
raise MoblyConfigError(
'Char "%s" is not allowed in test bed names.' % l)
def _validate_testbed_configs(testbed_configs):
"""Validates the testbed configurations.
Args:
testbed_configs: A list of testbed configuration json objects.
Raises:
If any part of the configuration is invalid, MoblyConfigError is raised.
"""
seen_names = set()
# Cross checks testbed configs for resource conflicts.
for config in testbed_configs:
# Check for conflicts between multiple concurrent testbed configs.
# No need to call it if there's only one testbed config.
name = config[keys.Config.key_testbed_name.value]
_validate_testbed_name(name)
# Test bed names should be unique.
if name in seen_names:
raise MoblyConfigError('Duplicate testbed name %s found.' % name)
seen_names.add(name)
def _verify_test_class_name(test_cls_name):
if not test_cls_name.endswith('Test'):
raise MoblyConfigError(
'Requested test class "%s" does not follow the test class naming '
'convention *Test.' % test_cls_name)
def gen_term_signal_handler(test_runners):
def termination_sig_handler(signal_num, frame):
for t in test_runners:
t.stop()
sys.exit(1)
return termination_sig_handler
def _parse_one_test_specifier(item):
"""Parse one test specifier from command line input.
This also verifies that the test class name and test case names follow
Mobly's naming conventions. A test class name has to end with "Test"; a test
case name has to start with "test".
Args:
item: A string that specifies a test class or test cases in one test
class to run.
Returns:
A tuple of a string and a list of strings. The string is the test class
name, the list of strings is a list of test case names. The list can be
None.
"""
tokens = item.split(':')
if len(tokens) > 2:
raise MoblyConfigError("Syntax error in test specifier %s" % item)
if len(tokens) == 1:
# This should be considered a test class name
test_cls_name = tokens[0]
_verify_test_class_name(test_cls_name)
return (test_cls_name, None)
elif len(tokens) == 2:
# This should be considered a test class name followed by
# a list of test case names.
test_cls_name, test_case_names = tokens
clean_names = []
_verify_test_class_name(test_cls_name)
for elem in test_case_names.split(','):
test_case_name = elem.strip()
if not test_case_name.startswith("test_"):
raise MoblyConfigError(
'Requested test case "%s" in test class "%s" does not'
' follow the test case naming convention test_*.' %
(test_case_name, test_cls_name))
clean_names.append(test_case_name)
return (test_cls_name, clean_names)
def parse_test_list(test_list):
"""Parse user provided test list into internal format for test_runner.
Args:
test_list: A list of test classes/cases.
"""
result = []
for elem in test_list:
result.append(_parse_one_test_specifier(elem))
return result
def load_test_config_file(test_config_path, tb_filters=None):
"""Processes the test configuration file provied by user.
Loads the configuration file into a json object, unpacks each testbed
config into its own json object, and validate the configuration in the
process.
Args:
test_config_path: Path to the test configuration file.
tb_filters: A subset of test bed names to be pulled from the config
file. If None, then all test beds will be selected.
Returns:
A list of test configuration json objects to be passed to
test_runner.TestRunner.
"""
configs = utils.load_config(test_config_path)
if tb_filters:
tbs = []
for tb in configs[keys.Config.key_testbed.value]:
if tb[keys.Config.key_testbed_name.value] in tb_filters:
tbs.append(tb)
if len(tbs) != len(tb_filters):
raise MoblyConfigError(
'Expect to find %d test bed configs, found %d. Check if'
' you have the correct test bed names.' %
(len(tb_filters), len(tbs)))
configs[keys.Config.key_testbed.value] = tbs
if (not keys.Config.key_log_path.value in configs and
_ENV_MOBLY_LOGPATH in os.environ):
print('Using environment log path: %s' %
(os.environ[_ENV_MOBLY_LOGPATH]))
configs[keys.Config.key_log_path.value] = os.environ[_ENV_MOBLY_LOGPATH]
_validate_test_config(configs)
_validate_testbed_configs(configs[keys.Config.key_testbed.value])
k_log_path = keys.Config.key_log_path.value
configs[k_log_path] = utils.abs_path(configs[k_log_path])
config_path, _ = os.path.split(utils.abs_path(test_config_path))
configs[keys.Config.key_config_path] = config_path
# Unpack testbeds into separate json objects.
beds = configs.pop(keys.Config.key_testbed.value)
config_jsons = []
# TODO: See if there is a better way to do this: b/29836695
config_path, _ = os.path.split(utils.abs_path(test_config_path))
configs[keys.Config.key_config_path] = config_path
for original_bed_config in beds:
new_test_config = dict(configs)
new_test_config[keys.Config.key_testbed.value] = original_bed_config
# Keys in each test bed config will be copied to a level up to be
# picked up for user_params. If the key already exists in the upper
# level, the local one defined in test bed config overwrites the
# general one.
new_test_config.update(original_bed_config)
config_jsons.append(new_test_config)
return config_jsons
def parse_test_file(fpath):
"""Parses a test file that contains test specifiers.
Args:
fpath: A string that is the path to the test file to parse.
Returns:
A list of strings, each is a test specifier.
"""
with open(fpath, 'r') as f:
tf = []
for line in f:
line = line.strip()
if not line:
continue
if len(tf) and (tf[-1].endswith(':') or tf[-1].endswith(',')):
tf[-1] += line
else:
tf.append(line)
return tf
| |
import os
import json
import glob
import copy
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
import argparse
import sys
sys.path.insert(0, os.environ['GOFFISH_CONFIG'])
import helper
from confdata import GoffishConf
'''
* client script for gofs
* assumption is that the script will run only in head node
*
*
* @author Neel Choudhury
* @version 1.0
* @see <a href="http://www.dream-lab.in/">DREAM:Lab</a>
*
* Copyright 2014 DREAM:Lab, Indian Institute of Science, 2014
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
class Graph:
def __init__(self, graph_folder = "", graph_id = "", number_of_instances = 0,
subgraph_bin_packing = 0, temporal_bin_packing = 0, graph_index_file = "" ):
self.graph_folder = os.path.expandvars(graph_folder)
self.graph_id = graph_id
self.number_of_instances = number_of_instances
self.subgraph_bin_packing = subgraph_bin_packing
self.temporal_bin_packing = temporal_bin_packing
self.graph_index_file = graph_index_file
def gen_gofs_config(head_node, machine_list):
h_add = head_node.address
gofs_config_data = ""
gofs_config_data += "# Config File for formatting a GoFS cluster\n"
gofs_config_data += "# -----------------------------------------\n"
gofs_config_data += "# the name node uri\n"
gofs_config_data += "gofs.namenode.type = edu.usc.goffish.gofs.namenode.RemoteNameNode\n"
gofs_config_data += "gofs.namenode.location = http://" + h_add +":9998\n"
gofs_config_data += "# list of data nodes to format and add to the name node\n"
gofs_config_data += "# repeat for each data node to include\n"
for m in machine_list:
filepath = "file://" + m.address + m.path.data + "\n"
gofs_config_data += "gofs.datanode =" + filepath
gofs_config_data = gofs_config_data + "# full class name of the serializer to use at every data node\n\n"
gofs_config_data = gofs_config_data + "# gofs.serializer = edu.usc.goffish.gofs.slice.JavaSliceSerializer\n"
gofs_config_data = gofs_config_data + "gofs.serializer = edu.usc.goffish.gofs.slice.KryoSliceSerializer\n"
print gofs_config_data
return gofs_config_data
def get_file_name_match(filepattern, folder_path):
file_path = folder_path + "/" + filepattern
return glob.glob(file_path)[0]
def gen_list_xml(head_node, graph_data):
root = Element('gml')
template_child = SubElement(root,'template')
template_child.text = get_file_name_match("*template*", graph_data.graph_folder)
instaces_child = SubElement(root,'instances')
for i in range(1, int(graph_data.number_of_instances)+1):
i_child = SubElement(instaces_child,'instance')
i_child.text = get_file_name_match("*instance-" + str(i)+ ".gml",graph_data.graph_folder)
print tostring(root)
return tostring(root)
def load(head_node, graph_data, list_xml_path, machine_list):
h_add = head_node.address
h_user = head_node.username
h_path = head_node.path
deploy_script = h_path.bin + '/gofs-bin/bin/GoFSDeployGraph'
command_string = deploy_script + ' '
if graph_data.subgraph_bin_packing != 0:
command_string += '-serializer:numsubgraphbins ' + str(graph_data.subgraph_bin_packing) + ' '
if graph_data.temporal_bin_packing != 0:
command_string += '-serializer:instancegroupingsize ' + str(graph_data.temporal_bin_packing) + ' '
command_string += "edu.usc.goffish.gofs.namenode.RemoteNameNode "
command_string += "http://" + h_add + ":9998 "
command_string += graph_data.graph_id + " "
command_string += str(len(machine_list)) + " "
command_string += list_xml_path
print command_string
print os.system(command_string)
return
def format(head_node):
h_add = head_node.address
h_user = head_node.username
format_script = head_node.path.bin + '/gofs-bin/bin/GoFSFormat'
os.system(format_script)
print format_script
return
def start(head_node, graph_index_file, port):
h_add = head_node.address
h_user = head_node.username
namenode_script = head_node.path.bin + '/gofs-bin/bin/GoFSNameNode'
namenode_address = "http://" + h_add + ":" + str(port)
command_string = namenode_script + " " + namenode_address + " "
if graph_index_file != "":
command_string += graph_index_file
print command_string
os.system(command_string)
return
def validate_arg_load(arg_file_available, args):
#if not arg_file_available:
return
def validate_file_argument(argument, file_name, default_file_name):
#here taking the sassumptio that this script will only be run in goffish
config_path = os.environ.get('GOFFISH_CONFIG') + '/'
#config_path = "./"
default_search_paths = ['./', config_path] #the default search space may change
if(file_name != None):
return os.path.isfile(file_name), file_name
else:
available = False
real_path = None
for path in default_search_paths:
if os.path.isfile(str(path) + default_file_name):
available = True
real_path = str(path) + default_file_name
return available, real_path
def parse_gofs_command_json(file_name):
cmd_arg = helper.read_json(file_name)
return cmd_arg
def parse_arguments():
#print 'neel'
parser = argparse.ArgumentParser()
parser.add_argument("option",
choices = ['START','FORMAT','LOAD','UNLOAD','STOP','STAT'],
help="Choose START to load just to download and compile source\n DEPLOY when deploying for the first time (include both creating folder structure and moving jars)\n REDEPLOY for fine tunig")
parser.add_argument("--conf-file", dest = 'conf_file',
help="json file containg configuration of goffish, By default tries to find in search path")
parser.add_argument("--cmd-file", dest = 'cmd_file',
help="json file containg arguments for commands, By default tries to find in search path")
parser.add_argument("--graph-folder", dest = 'graph_folder',
help="Name of the folder containg the gml file")
parser.add_argument("--graph-id", dest = 'graph_id',
help=" Id of the graph to be deployed")
parser.add_argument("--number-of-instance", dest = 'number_of_instance', type=int,
help="Number of time steps in graph")
parser.add_argument("--subgraph-bin-packing", dest = 'subgraph_bin_packing', type=int,
help="Number of subgraph bins in each parameter")
parser.add_argument("--temporal-bin-packing", dest = 'temporal_bin_packing', type=int,
help="Number of graph along time steps backed into bin")
args = parser.parse_args()
(conf_file_avilable, conf_file) = validate_file_argument('conf-file', args.conf_file, 'goffish-conf.json')
(arg_file_available, arg_file) = validate_file_argument('cmd-file', args.cmd_file, 'graph_data.json')
if not conf_file_avilable:
print "Provide correct path of goffish-conf.json file in --conf-file option or keep it in search path"
sys.exit()
if not arg_file_available:
if args.option == 'START':
validate_arg_start()
elif args.option == 'FORMAT':
validate_arg_format()
elif args.option == 'LOAD':
validate_arg_load()
conf = GoffishConf(conf_file)
cmd_args = None
if arg_file_available:
cmd_args = parse_gofs_command_json(arg_file)
else:
#Few line hwew
cmd_dic['graph_folder'] = args.graph_folder
cmd_dic['graph_id'] = args.graph_id
cmd_dic['number_of_instance'] = args.number_of_instances
cmd_dic['subgraph_bin_packing'] = args.subgraph_bin_packing
cmd_dic['temporal_bin_packing'] = args.temporal_bin_packing
cmd_arg = dict((k, v) for k, v in cmd_dic.items() if v != None)
graph_data = Graph(**cmd_args)
return (args.option, conf, graph_data)
(option, conf, graph_data) = parse_arguments()
gofs_config_string = gen_gofs_config(conf.head_node, conf.machine_list)
gofs_config_path = conf.head_node.path.bin + '/gofs-bin/conf/gofs.config'
helper.write_file(gofs_config_path , gofs_config_string)
list_xml_path = conf.head_node.path.bin + '/gofs-bin/conf/list.xml'
if option == 'LOAD':
list_xml_string = gen_list_xml(conf.head_node, graph_data)
helper.write_file(list_xml_path, list_xml_string)
if option == 'START':
start(conf.head_node, graph_data.graph_index_file, 9998)
elif option == 'FORMAT':
format(conf.head_node)
elif option == 'LOAD':
load(conf.head_node, graph_data, list_xml_path, conf.machine_list)
else:
print "Still Not Supported UNLOAD, STOP, STAT"
| |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" The file describes the parameter tree view
QParameterView
"""
from PyQt4 import QtCore, QtGui
from vistrails.core.inspector import PipelineInspector
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.basic_modules import Constant
from vistrails.gui.common_widgets import QSearchTreeWindow, QSearchTreeWidget
from vistrails.gui.paramexplore.pe_pipeline import QAnnotatedPipelineView
from vistrails.gui.vistrails_palette import QVistrailsPaletteInterface
from vistrails.core.utils import InstanceObject
from vistrails.core.debug import debug
################################################################################
class ParameterInfo(InstanceObject):
# ParameterInfo(type=,
# identifier=,
# namespace=,
# value=,
# id=,
# dbtype=,
# parent_dbtype=,
# parent_id=,
# is_alias=)
#new: ParameterInfo(module_id=,
# name=,
# pos=,
# value=,
# spec=,
# is_alias=)
pass
################################################################################
class QParameterView(QtGui.QWidget, QVistrailsPaletteInterface):
"""
QParameterView contains the parameter exploration properties and the
parameter palette
"""
def __init__(self, controller=None, parent=None):
QtGui.QWidget.__init__(self, parent)
self.set_title('Pipeline Methods')
self.controller = None
vLayout = QtGui.QVBoxLayout()
vLayout.setMargin(0)
vLayout.setSpacing(5)
self.setLayout(vLayout)
self.toggleUnsetParameters = QtGui.QCheckBox('Show Unset Parameters')
vLayout.addWidget(self.toggleUnsetParameters, 0, QtCore.Qt.AlignRight)
self.parameterWidget = QParameterWidget()
vLayout.addWidget(self.parameterWidget)
self.treeWidget = self.parameterWidget.treeWidget
self.pipeline_view = QAnnotatedPipelineView()
self.pipeline_view.setReadOnlyMode(True)
vLayout.addWidget(self.pipeline_view)
vLayout.setStretch(0,0)
vLayout.setStretch(1,1)
vLayout.setStretch(2,0)
self.connect(self.toggleUnsetParameters, QtCore.SIGNAL("toggled(bool)"),
self.parameterWidget.treeWidget.toggleUnsetParameters)
self.set_controller(controller)
def set_controller(self, controller):
if self.controller == controller:
return
self.controller = controller
self.pipeline_view.set_controller(controller)
if self.controller is not None:
self.set_pipeline(controller.current_pipeline)
else:
self.set_pipeline(None)
def set_pipeline(self, pipeline):
if self.controller is None:
return
self.pipeline = pipeline
self.parameterWidget.set_pipeline(pipeline, self.controller)
if pipeline:
self.pipeline_view.scene().setupScene(pipeline)
else:
self.pipeline_view.scene().clear()
self.pipeline_view.updateAnnotatedIds(pipeline)
class QParameterWidget(QSearchTreeWindow):
"""
QParameterWidget is a special widget for displaying aliases and
parameters inside a pipeline
"""
def createTreeWidget(self):
""" createTreeWidget() -> QModuleTreeWidget
Return the search tree widget for this window
"""
treeWidget = QParameterTreeWidget(self)
return treeWidget
def set_pipeline(self, pipeline, controller):
self.pipeline = pipeline
self.treeWidget.updateFromPipeline(pipeline, controller)
class QParameterTreeWidget(QSearchTreeWidget):
"""
QParameterTreeWidget is a subclass of QSearchTreeWidget to display all
Vistrails Module
"""
def __init__(self, parent=None):
""" QParameterTreeWidget(parent: QWidget) -> QParameterTreeWidget
Set up size policy and header
"""
QSearchTreeWidget.__init__(self, parent)
self.header().hide()
self.setRootIsDecorated(False)
self.delegate = QParameterTreeWidgetItemDelegate(self, self)
self.setItemDelegate(self.delegate)
self.showUnsetParameters = False
def updateFromPipeline(self, pipeline, controller):
""" updateFromPipeline(pipeline: Pipeline) -> None
Read the list of aliases and parameters from the pipeline
"""
self.clear()
if not pipeline:
return
# Update the aliases
if len(pipeline.aliases)>0:
aliasRoot = QParameterTreeWidgetItem(None, self, ['Aliases'])
aliasRoot.setFlags(QtCore.Qt.ItemIsEnabled)
for (alias, info) in pipeline.aliases.iteritems():
ptype, pId, parentType, parentId, mId = info
parameter = pipeline.db_get_object(ptype, pId)
function = pipeline.db_get_object(parentType, parentId)
v = parameter.strValue
port_spec = function.get_spec('input')
port_spec_item = port_spec.port_spec_items[parameter.pos]
label = ['%s = %s' % (alias, v)]
pInfo = ParameterInfo(module_id=mId,
name=function.name,
pos=parameter.pos,
value=v,
spec=port_spec_item,
is_alias=True)
aliasItem = QParameterTreeWidgetItem((alias, [pInfo]),
aliasRoot, label)
aliasRoot.setExpanded(True)
vistrailVarsRoot = QParameterTreeWidgetItem(None, self,
['Vistrail Variables'])
vistrailVarsRoot.setHidden(True)
# Now go through all modules and functions
inspector = PipelineInspector()
inspector.inspect_ambiguous_modules(pipeline)
sortedModules = sorted(pipeline.modules.iteritems(),
key=lambda item: item[1].name)
reg = get_module_registry()
for mId, module in sortedModules:
if module.is_vistrail_var():
vistrailVarsRoot.setHidden(False)
vistrailVarsRoot.setExpanded(True)
port_spec = module.get_port_spec('value', 'input')
if not port_spec:
debug.critical("Not port_spec for value in module %s" % module)
continue
port_spec_items = port_spec.port_spec_items
if not controller.has_vistrail_variable_with_uuid(
module.get_vistrail_var()):
continue
vv = controller.get_vistrail_variable_by_uuid(
module.get_vistrail_var())
label = ['%s = %s' % (vv.name, vv.value)]
pList = [ParameterInfo(module_id=mId,
name=port_spec.name,
pos=port_spec.port_spec_items[pId].pos,
value="",
spec=port_spec.port_spec_items[pId],
is_alias=False)
for pId in xrange(len(port_spec.port_spec_items))]
mItem = QParameterTreeWidgetItem((vv.name, pList),
vistrailVarsRoot,
label)
continue
function_names = {}
# Add existing parameters
mLabel = [module.name]
moduleItem = None
if len(module.functions)>0:
for fId in xrange(len(module.functions)):
function = module.functions[fId]
function_names[function.name] = function
if len(function.params)==0: continue
if moduleItem==None:
if inspector.annotated_modules.has_key(mId):
annotatedId = inspector.annotated_modules[mId]
moduleItem = QParameterTreeWidgetItem(annotatedId,
self, mLabel)
else:
moduleItem = QParameterTreeWidgetItem(None,
self, mLabel)
v = ', '.join([p.strValue for p in function.params])
label = ['%s(%s)' % (function.name, v)]
try:
port_spec = function.get_spec('input')
except Exception, e:
debug.critical("get_spec failed: %s %s %s" % \
(module, function, function.sigstring))
continue
port_spec_items = port_spec.port_spec_items
pList = [ParameterInfo(module_id=mId,
name=function.name,
pos=function.params[pId].pos,
value=function.params[pId].strValue,
spec=port_spec_items[pId],
is_alias=False)
for pId in xrange(len(function.params))]
mName = module.name
if moduleItem.parameter!=None:
mName += '(%d)' % moduleItem.parameter
fName = '%s :: %s' % (mName, function.name)
mItem = QParameterTreeWidgetItem((fName, pList),
moduleItem,
label)
# Add available parameters
if module.is_valid:
for port_spec in module.destinationPorts():
if (port_spec.name in function_names or
not port_spec.is_valid or
not len(port_spec.port_spec_items) or
not reg.is_constant(port_spec)):
# The function already exists or is empty
# or contains non-constant modules
continue
if moduleItem==None:
if inspector.annotated_modules.has_key(mId):
annotatedId = inspector.annotated_modules[mId]
moduleItem = QParameterTreeWidgetItem(annotatedId,
self,
mLabel,
False)
else:
moduleItem = QParameterTreeWidgetItem(None, self,
mLabel, False)
v = ', '.join([p.module for p in port_spec.port_spec_items])
label = ['%s(%s)' % (port_spec.name, v)]
pList = [ParameterInfo(module_id=mId,
name=port_spec.name,
pos=port_spec.port_spec_items[pId].pos,
value="",
spec=port_spec.port_spec_items[pId],
is_alias=False)
for pId in xrange(len(port_spec.port_spec_items))]
mName = module.name
if moduleItem.parameter!=None:
mName += '(%d)' % moduleItem.parameter
fName = '%s :: %s' % (mName, port_spec.name)
mItem = QParameterTreeWidgetItem((fName, pList),
moduleItem,
label, False)
if moduleItem:
moduleItem.setExpanded(True)
self.toggleUnsetParameters(self.showUnsetParameters)
def toggleUnsetParameters(self, state):
self.showUnsetParameters = state
for item in self.findItems("*", QtCore.Qt.MatchWildcard | QtCore.Qt.MatchRecursive):
if not item.isSet:
item.setHidden(not state)
class QParameterTreeWidgetItemDelegate(QtGui.QItemDelegate):
"""
QParameterTreeWidgetItemDelegate will override the original
QTreeWidget paint function to draw buttons for top-level item
similar to QtDesigner. This mimics
Qt/tools/designer/src/lib/shared/sheet_delegate, which is only a
private class from QtDesigned.
"""
def __init__(self, view, parent):
""" QParameterTreeWidgetItemDelegate(view: QTreeView,
parent: QWidget)
-> QParameterTreeWidgetItemDelegate
Create the item delegate given the tree view
"""
QtGui.QItemDelegate.__init__(self, parent)
self.treeView = view
def paint(self, painter, option, index):
""" painter(painter: QPainter, option QStyleOptionViewItem,
index: QModelIndex) -> None
Repaint the top-level item to have a button-look style
"""
model = index.model()
if model.parent(index).isValid()==False:
style = self.treeView.style()
r = option.rect
textrect = QtCore.QRect(r.left() + 10,
r.top(),
r.width() - 10,
r.height())
font = painter.font()
font.setBold(True)
painter.setFont(font)
text = option.fontMetrics.elidedText(
model.data(index, QtCore.Qt.DisplayRole),
QtCore.Qt.ElideMiddle,
textrect.width()-10)
style.drawItemText(painter,
textrect,
QtCore.Qt.AlignLeft,
option.palette,
self.treeView.isEnabled(),
text)
painter.setPen(QtGui.QPen(QtCore.Qt.black))
fm = QtGui.QFontMetrics(font)
size = fm.size(QtCore.Qt.TextSingleLine, text)
painter.drawLine(textrect.left()-5,
textrect.bottom()-1,
textrect.left()+size.width()+5,
textrect.bottom()-1)
annotatedId = model.data(index, QtCore.Qt.UserRole+1)
if annotatedId:
idRect = QtCore.QRect(
QtCore.QPoint(textrect.left()+size.width()+5,
textrect.top()),
textrect.bottomRight())
QAnnotatedPipelineView.drawId(painter, idRect,
annotatedId,
QtCore.Qt.AlignLeft |
QtCore.Qt.AlignVCenter)
else:
QtGui.QItemDelegate.paint(self, painter, option, index)
def sizeHint(self, option, index):
""" sizeHint(option: QStyleOptionViewItem, index: QModelIndex) -> None
Take into account the size of the top-level button
"""
return (QtGui.QItemDelegate.sizeHint(self, option, index) +
QtCore.QSize(2, 2))
class QParameterTreeWidgetItem(QtGui.QTreeWidgetItem):
"""
QParameterTreeWidgetItem represents module on QParameterTreeWidget
"""
def __init__(self, info, parent, labelList, isSet=True):
""" QParameterTreeWidgetItem(info: (str, []),
parent: QTreeWidgetItem
labelList: string,
isSet: bool)
-> QParameterTreeWidget
Create a new tree widget item with a specific parent and
labels. info describing a set of paramters as follow:
(name, [ParameterInfo]):
name = Name of the parameter set (alias or function)
If this item is a top-level item, info can either be None or
an integer specifying the annotated id of this module
isSet indicates if it represents a set or unset parameter
"""
self.parameter = info
QtGui.QTreeWidgetItem.__init__(self, parent, labelList)
if isinstance(self.parameter, int):
self.setData(0, QtCore.Qt.UserRole+1,
self.parameter)
self.isSet = isSet
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An apiproxy stub that calls a remote handler via HTTP.
This allows easy remote access to the App Engine datastore, and potentially any
of the other App Engine APIs, using the same interface you use when accessing
the service locally.
An example Python script:
---
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from myapp import models
import getpass
def auth_func():
return (raw_input('Username:'), getpass.getpass('Password:'))
remote_api_stub.ConfigureRemoteApi(None, '/_ah/remote_api', auth_func,
'my-app.appspot.com')
# Now you can access the remote datastore just as if your code was running on
# App Engine!
houses = models.House.all().fetch(100)
for a_house in q:
a_house.doors += 1
db.put(houses)
---
A few caveats:
- Where possible, avoid iterating over queries. Fetching as many results as you
will need is faster and more efficient. If you don't know how many results
you need, or you need 'all of them', iterating is fine.
- Likewise, it's a good idea to put entities in batches. Instead of calling put
for each individual entity, accumulate them and put them in batches using
db.put(), if you can.
- Requests and responses are still limited to 1MB each, so if you have large
entities or try and fetch or put many of them at once, your requests may fail.
"""
import google
import os
import pickle
import random
import sys
import thread
import threading
import yaml
import hashlib
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
else:
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools import appengine_rpc
_REQUEST_ID_HEADER = 'HTTP_X_APPENGINE_REQUEST_ID'
class Error(Exception):
"""Base class for exceptions in this module."""
class ConfigurationError(Error):
"""Exception for configuration errors."""
class UnknownJavaServerError(Error):
"""Exception for exceptions returned from a Java remote_api handler."""
def GetUserAgent():
"""Determines the value of the 'User-agent' header to use for HTTP requests.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., "remote_api/1.0.1 Darwin/9.2.0 Python/2.5.2".
"""
product_tokens = []
product_tokens.append("Google-remote_api/1.0")
product_tokens.append(appengine_rpc.GetPlatformToken())
python_version = ".".join(str(i) for i in sys.version_info)
product_tokens.append("Python/%s" % python_version)
return " ".join(product_tokens)
def GetSourceName():
return "Google-remote_api-1.0"
def HashEntity(entity):
"""Return a very-likely-unique hash of an entity."""
return hashlib.sha1(entity.Encode()).digest()
class TransactionData(object):
"""Encapsulates data about an individual transaction."""
def __init__(self, thread_id, is_xg):
self.thread_id = thread_id
self.preconditions = {}
self.entities = {}
self.is_xg = is_xg
class RemoteStub(object):
"""A stub for calling services on a remote server over HTTP.
You can use this to stub out any service that the remote server supports.
"""
def __init__(self, server, path, _test_stub_map=None):
"""Constructs a new RemoteStub that communicates with the specified server.
Args:
server: An instance of a subclass of
google.appengine.tools.appengine_rpc.AbstractRpcServer.
path: The path to the handler this stub should send requests to.
"""
self._server = server
self._path = path
self._test_stub_map = _test_stub_map
def _PreHookHandler(self, service, call, request, response):
pass
def _PostHookHandler(self, service, call, request, response):
pass
def MakeSyncCall(self, service, call, request, response):
self._PreHookHandler(service, call, request, response)
try:
test_stub = self._test_stub_map and self._test_stub_map.GetStub(service)
if test_stub:
test_stub.MakeSyncCall(service, call, request, response)
else:
self._MakeRealSyncCall(service, call, request, response)
finally:
self._PostHookHandler(service, call, request, response)
def _MakeRealSyncCall(self, service, call, request, response):
request_pb = remote_api_pb.Request()
request_pb.set_service_name(service)
request_pb.set_method(call)
request_pb.set_request(request.Encode())
if _REQUEST_ID_HEADER in os.environ:
request_pb.set_request_id(os.environ[_REQUEST_ID_HEADER])
response_pb = remote_api_pb.Response()
encoded_request = request_pb.Encode()
encoded_response = self._server.Send(self._path, encoded_request)
response_pb.ParseFromString(encoded_response)
if response_pb.has_application_error():
error_pb = response_pb.application_error()
raise apiproxy_errors.ApplicationError(error_pb.code(),
error_pb.detail())
elif response_pb.has_exception():
raise pickle.loads(response_pb.exception())
elif response_pb.has_java_exception():
raise UnknownJavaServerError("An unknown error has occured in the "
"Java remote_api handler for this call.")
else:
response.ParseFromString(response_pb.response())
def CreateRPC(self):
return apiproxy_rpc.RPC(stub=self)
class RemoteDatastoreStub(RemoteStub):
"""A specialised stub for accessing the App Engine datastore remotely.
A specialised stub is required because there are some datastore operations
that preserve state between calls. This stub makes queries possible.
Transactions on the remote datastore are unfortunately still impossible.
"""
def __init__(self, server, path, default_result_count=20,
_test_stub_map=None):
"""Constructor.
Args:
server: The server name to connect to.
path: The URI path on the server.
default_result_count: The number of items to fetch, by default, in a
datastore Query or Next operation. This affects the batch size of
query iterators.
"""
super(RemoteDatastoreStub, self).__init__(server, path, _test_stub_map)
self.default_result_count = default_result_count
self.__queries = {}
self.__transactions = {}
self.__next_local_cursor = 1
self.__local_cursor_lock = threading.Lock()
self.__next_local_tx = 1
self.__local_tx_lock = threading.Lock()
def MakeSyncCall(self, service, call, request, response):
assert service == 'datastore_v3'
explanation = []
assert request.IsInitialized(explanation), explanation
handler = getattr(self, '_Dynamic_' + call, None)
if handler:
handler(request, response)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(service, call, request,
response)
assert response.IsInitialized(explanation), explanation
def _Dynamic_RunQuery(self, query, query_result, cursor_id = None):
if query.has_transaction():
txdata = self.__transactions[query.transaction().handle()]
tx_result = remote_api_pb.TransactionQueryResult()
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'TransactionQuery', query, tx_result)
query_result.CopyFrom(tx_result.result())
eg_key = tx_result.entity_group_key()
encoded_eg_key = eg_key.Encode()
eg_hash = None
if tx_result.has_entity_group():
eg_hash = HashEntity(tx_result.entity_group())
old_key, old_hash = txdata.preconditions.get(encoded_eg_key, (None, None))
if old_key is None:
txdata.preconditions[encoded_eg_key] = (eg_key, eg_hash)
elif old_hash != eg_hash:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.CONCURRENT_TRANSACTION,
'Transaction precondition failed.')
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'RunQuery', query, query_result)
if cursor_id is None:
self.__local_cursor_lock.acquire()
try:
cursor_id = self.__next_local_cursor
self.__next_local_cursor += 1
finally:
self.__local_cursor_lock.release()
if query_result.more_results():
query.set_offset(query.offset() + query_result.result_size())
if query.has_limit():
query.set_limit(query.limit() - query_result.result_size())
self.__queries[cursor_id] = query
else:
self.__queries[cursor_id] = None
query_result.mutable_cursor().set_cursor(cursor_id)
def _Dynamic_Next(self, next_request, query_result):
assert next_request.offset() == 0
cursor_id = next_request.cursor().cursor()
if cursor_id not in self.__queries:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Cursor %d not found' % cursor_id)
query = self.__queries[cursor_id]
if query is None:
query_result.set_more_results(False)
return
else:
if next_request.has_count():
query.set_count(next_request.count())
else:
query.clear_count()
self._Dynamic_RunQuery(query, query_result, cursor_id)
query_result.set_skipped_results(0)
def _Dynamic_Get(self, get_request, get_response):
txid = None
if get_request.has_transaction():
txid = get_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
keys = [(k, k.Encode()) for k in get_request.key_list()]
new_request = datastore_pb.GetRequest()
for key, enckey in keys:
if enckey not in txdata.entities:
new_request.add_key().CopyFrom(key)
else:
new_request = get_request
if new_request.key_size() > 0:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Get', new_request, get_response)
if txid is not None:
newkeys = new_request.key_list()
entities = get_response.entity_list()
for key, entity in zip(newkeys, entities):
entity_hash = None
if entity.has_entity():
entity_hash = HashEntity(entity.entity())
txdata.preconditions[key.Encode()] = (key, entity_hash)
new_response = datastore_pb.GetResponse()
it = iter(get_response.entity_list())
for key, enckey in keys:
if enckey in txdata.entities:
cached_entity = txdata.entities[enckey][1]
if cached_entity:
new_response.add_entity().mutable_entity().CopyFrom(cached_entity)
else:
new_response.add_entity()
else:
new_entity = it.next()
if new_entity.has_entity():
assert new_entity.entity().key() == key
new_response.add_entity().CopyFrom(new_entity)
else:
new_response.add_entity()
get_response.CopyFrom(new_response)
def _Dynamic_Put(self, put_request, put_response):
if put_request.has_transaction():
entities = put_request.entity_list()
requires_id = lambda x: x.id() == 0 and not x.has_name()
new_ents = [e for e in entities
if requires_id(e.key().path().element_list()[-1])]
id_request = datastore_pb.PutRequest()
txid = put_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
if new_ents:
for ent in new_ents:
e = id_request.add_entity()
e.mutable_key().CopyFrom(ent.key())
e.mutable_entity_group()
id_response = datastore_pb.PutResponse()
if txdata.is_xg:
rpc_name = 'GetIDsXG'
else:
rpc_name = 'GetIDs'
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', rpc_name, id_request, id_response)
assert id_request.entity_size() == id_response.key_size()
for key, ent in zip(id_response.key_list(), new_ents):
ent.mutable_key().CopyFrom(key)
ent.mutable_entity_group().add_element().CopyFrom(
key.path().element(0))
for entity in entities:
txdata.entities[entity.key().Encode()] = (entity.key(), entity)
put_response.add_key().CopyFrom(entity.key())
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Put', put_request, put_response)
def _Dynamic_Delete(self, delete_request, response):
if delete_request.has_transaction():
txid = delete_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
for key in delete_request.key_list():
txdata.entities[key.Encode()] = (key, None)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Delete', delete_request, response)
def _Dynamic_BeginTransaction(self, request, transaction):
self.__local_tx_lock.acquire()
try:
txid = self.__next_local_tx
self.__transactions[txid] = TransactionData(thread.get_ident(),
request.allow_multiple_eg())
self.__next_local_tx += 1
finally:
self.__local_tx_lock.release()
transaction.set_handle(txid)
transaction.set_app(request.app())
def _Dynamic_Commit(self, transaction, transaction_response):
txid = transaction.handle()
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
tx = remote_api_pb.TransactionRequest()
tx.set_allow_multiple_eg(txdata.is_xg)
for key, hash in txdata.preconditions.values():
precond = tx.add_precondition()
precond.mutable_key().CopyFrom(key)
if hash:
precond.set_hash(hash)
puts = tx.mutable_puts()
deletes = tx.mutable_deletes()
for key, entity in txdata.entities.values():
if entity:
puts.add_entity().CopyFrom(entity)
else:
deletes.add_key().CopyFrom(key)
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'Transaction',
tx, datastore_pb.PutResponse())
def _Dynamic_Rollback(self, transaction, transaction_response):
txid = transaction.handle()
self.__local_tx_lock.acquire()
try:
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
finally:
self.__local_tx_lock.release()
def _Dynamic_CreateIndex(self, index, id_response):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_UpdateIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_DeleteIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
ALL_SERVICES = set(remote_api_services.SERVICE_PB_MAP)
def GetRemoteAppIdFromServer(server, path, remote_token=None):
"""Return the app id from a connection to an existing server.
Args:
server: An appengine_rpc.AbstractRpcServer
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
remote_token: Token to validate that the response was to this request.
Returns:
App ID as reported by the remote server.
Raises:
ConfigurationError: The server returned an invalid response.
"""
if not remote_token:
random.seed()
remote_token = str(random.random())[2:]
remote_token = str(remote_token)
urlargs = {'rtok': remote_token}
response = server.Send(path, payload=None, **urlargs)
if not response.startswith('{'):
raise ConfigurationError(
'Invalid response recieved from server: %s' % response)
app_info = yaml.load(response)
if not app_info or 'rtok' not in app_info or 'app_id' not in app_info:
raise ConfigurationError('Error parsing app_id lookup response')
if str(app_info['rtok']) != remote_token:
raise ConfigurationError('Token validation failed during app_id lookup. '
'(sent %s, got %s)' % (repr(remote_token),
repr(app_info['rtok'])))
return app_info['app_id']
def ConfigureRemoteApiFromServer(server, path, app_id, services=None,
default_auth_domain=None,
use_remote_datastore=True):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Args:
server: An AbstractRpcServer
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
app_id: The app_id of your app, as declared in app.yaml.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
use_remote_datastore: Whether to use RemoteDatastoreStub instead of passing
through datastore requests. RemoteDatastoreStub batches transactional
datastore requests since, in production, datastore requires are scoped to
a single request.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the Remote API.
"""
if services is None:
services = set(ALL_SERVICES)
else:
services = set(services)
unsupported = services.difference(ALL_SERVICES)
if unsupported:
raise ConfigurationError('Unsupported service(s): %s'
% (', '.join(unsupported),))
os.environ['APPLICATION_ID'] = app_id
os.environ.setdefault('AUTH_DOMAIN', default_auth_domain or 'gmail.com')
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if 'datastore_v3' in services and use_remote_datastore:
services.remove('datastore_v3')
datastore_stub = RemoteDatastoreStub(server, path)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
stub = RemoteStub(server, path)
for service in services:
apiproxy_stub_map.apiproxy.RegisterStub(service, stub)
def GetRemoteAppId(servername,
path,
auth_func,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
save_cookies=False):
"""Get the remote appid as reported at servername/path.
This will also return an AbstractRpcServer server, which can be used with
ConfigureRemoteApiFromServer.
Args:
servername: The hostname your app is deployed on.
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
save_cookies: Forwarded to rpc_server_factory function.
Returns:
(app_id, server): The application ID and an AbstractRpcServer.
"""
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), save_cookies=save_cookies,
debug_data=False, secure=secure)
app_id = GetRemoteAppIdFromServer(server, path, rtok)
return app_id, server
def ConfigureRemoteApi(app_id,
path,
auth_func,
servername=None,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
services=None,
default_auth_domain=None,
save_cookies=False,
use_remote_datastore=True):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Either servername must be provided or app_id must not be None. If app_id
is None and a servername is provided, this function will send a request
to the server to retrieve the app_id.
Note that if the app_id is specified, the internal appid must be used;
this may include a partition and a domain. It is often easier to let
remote_api_stub retreive the app_id automatically.
Args:
app_id: The app_id of your app, as declared in app.yaml, or None.
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
servername: The hostname your app is deployed on. Defaults to
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
save_cookies: Forwarded to rpc_server_factory function.
use_remote_datastore: Whether to use RemoteDatastoreStub instead of passing
through datastore requests. RemoteDatastoreStub batches transactional
datastore requests since, in production, datastore requires are scoped to
a single request.
Returns:
server, the server created by rpc_server_factory, which may be useful for
calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
"""
if not servername and not app_id:
raise ConfigurationError('app_id or servername required')
if not servername:
servername = '%s.appspot.com' % (app_id,)
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), save_cookies=save_cookies,
debug_data=False, secure=secure)
if not app_id:
app_id = GetRemoteAppIdFromServer(server, path, rtok)
ConfigureRemoteApiFromServer(server, path, app_id, services,
default_auth_domain, use_remote_datastore)
return server
def MaybeInvokeAuthentication():
"""Sends an empty request through to the configured end-point.
If authentication is necessary, this will cause the rpc_server to invoke
interactive authentication.
"""
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
if isinstance(datastore_stub, RemoteStub):
datastore_stub._server.Send(datastore_stub._path, payload=None)
else:
raise ConfigurationError('remote_api is not configured.')
ConfigureRemoteDatastore = ConfigureRemoteApi
| |
"""
module for generating C, C++, Fortran77, Fortran90 and python routines that
evaluate sympy expressions. This module is work in progress. Only the
milestones with a '+' character in the list below have been completed.
--- How is sympy.utilities.codegen different from sympy.printing.ccode? ---
We considered the idea to extend the printing routines for sympy functions in
such a way that it prints complete compilable code, but this leads to a few
unsurmountable issues that can only be tackled with dedicated code generator:
- For C, one needs both a code and a header file, while the printing routines
generate just one string. This code generator can be extended to support
.pyf files for f2py.
- SymPy functions are not concerned with programming-technical issues, such
as input, output and input-output arguments. Other examples are contiguous
or non-contiguous arrays, including headers of other libraries such as gsl
or others.
- It is highly interesting to evaluate several sympy functions in one C
routine, eventually sharing common intermediate results with the help
of the cse routine. This is more than just printing.
- From the programming perspective, expressions with constants should be
evaluated in the code generator as much as possible. This is different
for printing.
--- Basic assumptions ---
* A generic Routine data structure describes the routine that must be
translated into C/Fortran/... code. This data structure covers all
features present in one or more of the supported languages.
* Descendants from the CodeGen class transform multiple Routine instances
into compilable code. Each derived class translates into a specific
language.
* In many cases, one wants a simple workflow. The friendly functions in the
last part are a simple api on top of the Routine/CodeGen stuff. They are
easier to use, but are less powerful.
--- Milestones ---
+ First working version with scalar input arguments, generating C code,
tests
+ Friendly functions that are easier to use than the rigorous
Routine/CodeGen workflow.
+ Integer and Real numbers as input and output
+ Output arguments
+ InputOutput arguments
+ Sort input/output arguments properly
+ Contiguous array arguments (numpy matrices)
+ Also generate .pyf code for f2py (in autowrap module)
+ Isolate constants and evaluate them beforehand in double precision
+ Fortran 90
- Common Subexpression Elimination
- User defined comments in the generated code
- Optional extra include lines for libraries/objects that can eval special
functions
- Test other C compilers and libraries: gcc, tcc, libtcc, gcc+gsl, ...
- Contiguous array arguments (sympy matrices)
- Non-contiguous array arguments (sympy matrices)
- ccode must raise an error when it encounters something that can not be
translated into c. ccode(integrate(sin(x)/x, x)) does not make sense.
- Complex numbers as input and output
- A default complex datatype
- Include extra information in the header: date, user, hostname, sha1
hash, ...
- Fortran 77
- C++
- Python
- ...
"""
from __future__ import with_statement
import os
from StringIO import StringIO
from sympy import __version__ as sympy_version
from sympy.core import Symbol, S, Expr, Tuple, Equality, Function
from sympy.core.compatibility import is_sequence
from sympy.printing.codeprinter import AssignmentError
from sympy.printing.ccode import ccode, CCodePrinter
from sympy.printing.fcode import fcode, FCodePrinter
from sympy.tensor import Idx, Indexed, IndexedBase
__all__ = [
# description of routines
"Routine", "DataType", "default_datatypes", "get_default_datatype",
"Argument", "InputArgument", "Result",
# routines -> code
"CodeGen", "CCodeGen", "FCodeGen",
# friendly functions
"codegen",
]
#
# Description of routines
#
class Routine(object):
"""Generic description of an evaluation routine for a set of sympy expressions.
A CodeGen class can translate instances of this class into C/Fortran/...
code. The routine specification covers all the features present in these
languages. The CodeGen part must raise an exception when certain features
are not present in the target language. For example, multiple return
values are possible in Python, but not in C or Fortran. Another example:
Fortran and Python support complex numbers, while C does not.
"""
def __init__(self, name, expr, argument_sequence=None):
"""Initialize a Routine instance.
``name``
A string with the name of this routine in the generated code
``expr``
The sympy expression that the Routine instance will represent. If
given a list or tuple of expressions, the routine will be
considered to have multiple return values.
``argument_sequence``
Optional list/tuple containing arguments for the routine in a
preferred order. If omitted, arguments will be ordered
alphabetically, but with all input aguments first, and then output
or in-out arguments.
A decision about whether to use output arguments or return values,
is made depending on the mathematical expressions. For an expression
of type Equality, the left hand side is made into an OutputArgument
(or an InOutArgument if appropriate). Else, the calculated
expression is the return values of the routine.
A tuple of exressions can be used to create a routine with both
return value(s) and output argument(s).
"""
arg_list = []
if is_sequence(expr):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = set([i.label for i in expressions.atoms(Idx)])
# symbols that should be arguments
symbols = expressions.atoms(Symbol) - local_vars
# Decide whether to use output argument or return value
return_val = []
output_args = []
for expr in expressions:
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
if isinstance(out_arg, Indexed):
dims = tuple([ (S.Zero, dim-1) for dim in out_arg.shape])
symbol = out_arg.base.label
elif isinstance(out_arg, Symbol):
dims = []
symbol = out_arg
else:
raise CodeGenError("Only Indexed or Symbol can define output arguments")
if expr.has(symbol):
output_args.append(InOutArgument(symbol, out_arg, expr, dimensions=dims))
else:
output_args.append(OutputArgument(symbol, out_arg, expr, dimensions=dims))
# avoid duplicate arguments
symbols.remove(symbol)
else:
return_val.append(Result(expr))
# setup input argument list
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for symbol in sorted(symbols, key=str):
if symbol in array_symbols:
dims = []
array = array_symbols[symbol]
for dim in array.shape:
dims.append((S.Zero, dim - 1))
metadata = {'dimensions': dims}
else:
metadata = {}
arg_list.append(InputArgument(symbol, **metadata))
output_args.sort(key=lambda x:str(x.name))
arg_list.extend(output_args)
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = filter(lambda x: x.name not in argument_sequence, arg_list)
if missing:
raise CodeGenArgumentListError("Argument list didn't specify: %s" %
", ".join([str(m.name) for m in missing]), missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = dict([(x.name, x) for x in arg_list])
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
self.name = name
self.arguments = arg_list
self.results = return_val
self.local_vars = local_vars
@property
def variables(self):
"""Returns a set containing all variables possibly used in this routine.
For routines with unnamed return values, the dummies that may or may
not be used will be included in the set.
"""
v = set(self.local_vars)
for arg in self.arguments:
v.add(arg.name)
for res in self.results:
v.add(res.result_var)
return v
@property
def result_variables(self):
"""Returns a list of OutputArgument, InOutArgument and Result.
If return values are present, they are at the end ot the list.
"""
args = [arg for arg in self.arguments if isinstance(arg, (OutputArgument, InOutArgument))]
args.extend(self.results)
return args
class DataType(object):
"""Holds strings for a certain datatype in different programming languages."""
def __init__(self, cname, fname, pyname):
self.cname = cname
self.fname = fname
self.pyname = pyname
default_datatypes = {
"int": DataType("int", "INTEGER*4", "int"),
"float": DataType("double", "REAL*8", "float")
}
def get_default_datatype(expr):
"""Derives a decent data type based on the assumptions on the expression."""
if expr.is_integer:
return default_datatypes["int"]
else:
return default_datatypes["float"]
class Variable(object):
"""Represents a typed variable."""
def __init__(self, name, datatype=None, dimensions=None, precision=None):
"""Initializes a Variable instance
name -- must be of class Symbol
datatype -- When not given, the data type will be guessed based
on the assumptions on the symbol argument.
dimension -- If present, the argument is interpreted as an array.
Dimensions must be a sequence containing tuples, i.e.
(lower, upper) bounds for each index of the array
precision -- FIXME
"""
if not isinstance(name, Symbol):
raise TypeError("The first argument must be a sympy symbol.")
if datatype is None:
datatype = get_default_datatype(name)
elif not isinstance(datatype, DataType):
raise TypeError("The (optional) `datatype' argument must be an instance of the DataType class.")
if dimensions and not isinstance(dimensions, (tuple, list)):
raise TypeError("The dimension argument must be a sequence of tuples")
self._name = name
self._datatype = {
'C': datatype.cname,
'FORTRAN': datatype.fname,
'PYTHON': datatype.pyname
}
self.dimensions = dimensions
self.precision = precision
@property
def name(self):
return self._name
def get_datatype(self, language):
"""Returns the datatype string for the requested langage.
>>> from sympy import Symbol
>>> from sympy.utilities.codegen import Variable
>>> x = Variable(Symbol('x'))
>>> x.get_datatype('c')
'double'
>>> x.get_datatype('fortran')
'REAL*8'
"""
try:
return self._datatype[language.upper()]
except KeyError:
raise CodeGenError("Has datatypes for languages: %s" %
", ".join(self._datatype))
class Argument(Variable):
"""An abstract Argument data structure: a name and a data type.
This structure is refined in the descendants below.
"""
def __init__(self, name, datatype=None, dimensions=None, precision=None):
""" See docstring of Variable.__init__
"""
Variable.__init__(self, name, datatype, dimensions, precision)
class InputArgument(Argument):
pass
class ResultBase(object):
"""Base class for all ``outgoing'' information from a routine
Objects of this class stores a sympy expression, and a sympy object
representing a result variable that will be used in the generated code
only if necessary.
"""
def __init__(self, expr, result_var):
self.expr = expr
self.result_var = result_var
class OutputArgument(Argument, ResultBase):
"""OutputArgument are always initialized in the routine
"""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
""" See docstring of Variable.__init__
"""
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
class InOutArgument(Argument, ResultBase):
"""InOutArgument are never initialized in the routine
"""
def __init__(self, name, result_var, expr, datatype=None, dimensions=None, precision=None):
""" See docstring of Variable.__init__
"""
Argument.__init__(self, name, datatype, dimensions, precision)
ResultBase.__init__(self, expr, result_var)
class Result(ResultBase):
"""An expression for a scalar return value.
The name result is used to avoid conflicts with the reserved word
'return' in the python language. It is also shorter than ReturnValue.
"""
def __init__(self, expr, datatype=None, precision=None):
"""Initialize a (scalar) return value.
The second argument is optional. When not given, the data type will
be guessed based on the assumptions on the expression argument.
"""
if not isinstance(expr, Expr):
raise TypeError("The first argument must be a sympy expression.")
temp_var = Variable(Symbol('result_%s'%hash(expr)),
datatype=datatype, dimensions=None, precision=precision)
ResultBase.__init__(self, expr, temp_var.name)
self._temp_variable = temp_var
def get_datatype(self, language):
return self._temp_variable.get_datatype(language)
#
# Transformation of routine objects into code
#
class CodeGen(object):
"""Abstract class for the code generators."""
def __init__(self, project="project"):
"""Initialize a code generator.
Derived classes will offer more options that affect the generated
code.
"""
self.project = project
def write(self, routines, prefix, to_files=False, header=True, empty=True):
"""Writes all the source code files for the given routines.
The generate source is returned as a list of (filename, contents)
tuples, or is written to files (see options). Each filename consists
of the given prefix, appended with an appropriate extension.
``routines``
A list of Routine instances to be written
``prefix``
The prefix for the output files
``to_files``
When True, the output is effectively written to files.
[DEFAULT=False] Otherwise, a list of (filename, contents)
tuples is returned.
``header``
When True, a header comment is included on top of each source
file. [DEFAULT=True]
``empty``
When True, empty lines are included to structure the source
files. [DEFAULT=True]
"""
if to_files:
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
with open(filename, "w") as f:
dump_fn(self, routines, f, prefix, header, empty)
else:
result = []
for dump_fn in self.dump_fns:
filename = "%s.%s" % (prefix, dump_fn.extension)
contents = StringIO()
dump_fn(self, routines, contents, prefix, header, empty)
result.append((filename, contents.getvalue()))
return result
def dump_code(self, routines, f, prefix, header=True, empty=True):
"""Write the code file by calling language specific methods in correct order
The generated file contains all the definitions of the routines in
low-level code and refers to the header file if appropriate.
:Arguments:
routines
A list of Routine instances
f
A file-like object to write the file to
prefix
The filename prefix, used to refer to the proper header file. Only
the basename of the prefix is used.
:Optional arguments:
header
When True, a header comment is included on top of each source file.
[DEFAULT=True]
empty
When True, empty lines are included to structure the source files.
[DEFAULT=True]
"""
code_lines = self._preprosessor_statements(prefix)
for routine in routines:
if empty: code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_locals(routine))
if empty: code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty: code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if header:
code_lines = ''.join(self._get_header() + [code_lines])
if code_lines:
f.write(code_lines)
class CodeGenError(Exception):
pass
class CodeGenArgumentListError(Exception):
@property
def missing_args(self):
return self.args[1]
header_comment = """Code generated with sympy %(version)s
See http://www.sympy.org/ for more information.
This file is part of '%(project)s'
"""
class CCodeGen(CodeGen):
"""
Generator for C code
The .write() method inherited from CodeGen will output a code file and an
inteface file, <prefix>.c and <prefix>.h respectively.
"""
code_extension = "c"
interface_extension = "h"
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("/" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version, "project": self.project}
for line in tmp.splitlines():
code_lines.append(" *%s*\n" % line.center(76))
code_lines.append(" " + "*"*78 + "/\n")
return code_lines
def get_prototype(self, routine):
"""Returns a string for the function prototype for the given routine.
If the routine has multiple result objects, an CodeGenError is
raised.
See: http://en.wikipedia.org/wiki/Function_prototype
"""
if len(routine.results) > 1:
raise CodeGenError("C only supports a single or no return value.")
elif len(routine.results) == 1:
ctype = routine.results[0].get_datatype('C')
else:
ctype = "void"
type_args = []
for arg in routine.arguments:
name = ccode(arg.name)
if arg.dimensions:
type_args.append((arg.get_datatype('C'), "*%s" % name))
elif isinstance(arg, ResultBase):
type_args.append((arg.get_datatype('C'), "&%s" % name))
else:
type_args.append((arg.get_datatype('C'), name))
arguments = ", ".join([ "%s %s" % t for t in type_args])
return "%s %s(%s)" % (ctype, routine.name, arguments)
def _preprosessor_statements(self, prefix):
code_lines = []
code_lines.append("#include \"%s.h\"\n" % os.path.basename(prefix))
code_lines.append("#include <math.h>\n")
return code_lines
def _get_routine_opening(self, routine):
prototype = self.get_prototype(routine)
return ["%s {\n" % prototype]
def _declare_arguments(self, routine):
# arguments are declared in prototype
return []
def _declare_locals(self, routine):
# loop variables are declared in loop statement
return []
def _call_printer(self, routine):
code_lines = []
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = None
elif isinstance(result, (OutputArgument, InOutArgument)):
assign_to = result.result_var
try:
constants, not_c, c_expr = ccode(result.expr, assign_to=assign_to, human=False)
except AssignmentError:
assign_to = result.result_var
code_lines.append("%s %s;\n" % (result.get_datatype('c'), str(assign_to)))
constants, not_c, c_expr = ccode(result.expr, assign_to=assign_to, human=False)
for name, value in sorted(constants, key=str):
code_lines.append("double const %s = %s;\n" % (name, value))
if assign_to:
code_lines.append("%s\n" % c_expr)
else:
code_lines.append(" return %s;\n" % c_expr)
return code_lines
def _indent_code(self, codelines):
p = CCodePrinter()
return p.indent_code(codelines)
def _get_routine_ending(self, routine):
return ["}\n"]
def dump_c(self, routines, f, prefix, header=True, empty=True):
self.dump_code(routines, f, prefix, header, empty)
dump_c.extension = code_extension
dump_c.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the C header file.
This file contains all the function declarations.
:Arguments:
routines
A list of Routine instances
f
A file-like object to write the file to
prefix
The filename prefix, used to construct the include guards.
:Optional arguments:
header
When True, a header comment is included on top of each source
file. [DEFAULT=True]
empty
When True, empty lines are included to structure the source
files. [DEFAULT=True]
"""
if header:
print >> f, ''.join(self._get_header())
guard_name = "%s__%s__H" % (self.project.replace(" ", "_").upper(), prefix.replace("/", "_").upper())
# include guards
if empty: print >> f
print >> f, "#ifndef %s" % guard_name
print >> f, "#define %s" % guard_name
if empty: print >> f
# declaration of the function prototypes
for routine in routines:
prototype = self.get_prototype(routine)
print >> f, "%s;" % prototype
# end if include guards
if empty: print >> f
print >> f, "#endif"
if empty: print >> f
dump_h.extension = interface_extension
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_c, dump_h]
class FCodeGen(CodeGen):
"""
Generator for Fortran 95 code
The .write() method inherited from CodeGen will output a code file and an
inteface file, <prefix>.f90 and <prefix>.h respectively.
"""
code_extension = "f90"
interface_extension = "h"
def __init__(self, project='project'):
CodeGen.__init__(self, project)
def _get_symbol(self, s):
"""returns the symbol as fcode print it"""
return fcode(s).strip()
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("!" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version, "project": self.project}
for line in tmp.splitlines():
code_lines.append("!*%s*\n" % line.center(76))
code_lines.append("!" + "*"*78 + '\n')
return code_lines
def _preprosessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""
Returns the opening statements of the fortran routine
"""
code_list = []
if len(routine.results) > 1:
raise CodeGenError("Fortran only supports a single or no return value.")
elif len(routine.results) == 1:
result = routine.results[0]
code_list.append(result.get_datatype('fortran'))
code_list.append("function")
else:
code_list.append("subroutine")
args = ", ".join("%s" % self._get_symbol(arg.name)
for arg in routine.arguments)
# name of the routine + arguments
code_list.append("%s(%s)\n" % (routine.name, args))
code_list = [ " ".join(code_list) ]
code_list.append('implicit none\n')
return code_list
def _declare_arguments(self, routine):
# argument type declarations
code_list = []
array_list = []
scalar_list = []
for arg in routine.arguments:
if isinstance(arg, InputArgument):
typeinfo = "%s, intent(in)" % arg.get_datatype('fortran')
elif isinstance(arg, InOutArgument):
typeinfo = "%s, intent(inout)" % arg.get_datatype('fortran')
elif isinstance(arg, OutputArgument):
typeinfo = "%s, intent(out)" % arg.get_datatype('fortran')
else:
raise CodeGenError("Unkown Argument type: %s"%type(arg))
fprint = self._get_symbol
if arg.dimensions:
# fortran arrays start at 1
dimstr = ", ".join(["%s:%s"%(
fprint(dim[0]+1), fprint(dim[1]+1))
for dim in arg.dimensions])
typeinfo += ", dimension(%s)" % dimstr
array_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
else:
scalar_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
# scalars first, because they can be used in array declarations
code_list.extend(scalar_list)
code_list.extend(array_list)
return code_list
def _declare_locals(self, routine):
code_list = []
for var in sorted(routine.local_vars, key=str):
typeinfo = get_default_datatype(var)
code_list.append("%s :: %s\n" % (
typeinfo.fname, self._get_symbol(var)))
return code_list
def _get_routine_ending(self, routine):
"""
Returns the closing statements of the fortran routine
"""
if len(routine.results) == 1:
return ["end function\n"]
else:
return ["end subroutine\n"]
def get_interface(self, routine):
"""Returns a string for the function interface for the given routine and
a single result object, which can be None.
If the routine has multiple result objects, a CodeGenError is
raised.
See: http://en.wikipedia.org/wiki/Function_prototype
"""
prototype = [ "interface\n" ]
prototype.extend(self._get_routine_opening(routine))
prototype.extend(self._declare_arguments(routine))
prototype.extend(self._get_routine_ending(routine))
prototype.append("end interface\n")
return "".join(prototype)
def _call_printer(self, routine):
declarations = []
code_lines = []
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = routine.name
elif isinstance(result, (OutputArgument, InOutArgument)):
assign_to = result.result_var
constants, not_fortran, f_expr = fcode(result.expr,
assign_to=assign_to, source_format='free', human=False)
for obj, v in sorted(constants, key=str):
t = get_default_datatype(obj)
declarations.append("%s, parameter :: %s = %s\n" % (t.fname, obj, v))
for obj in sorted(not_fortran, key=str):
t = get_default_datatype(obj)
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append("%s :: %s\n" % (t.fname, name))
code_lines.append("%s\n" % f_expr)
return declarations + code_lines
def _indent_code(self, codelines):
p = FCodePrinter({'source_format': 'free', 'human': False})
return p.indent_code(codelines)
def dump_f95(self, routines, f, prefix, header=True, empty=True):
# check that symbols are unique with ignorecase
for r in routines:
lowercase = set(map(lambda x: str(x).lower(), r.variables))
orig_case = set(map(lambda x: str(x), r.variables))
if len(lowercase) < len(orig_case):
raise CodeGenError("Fortran ignores case. Got symbols: %s"%
(", ".join([str(var) for var in r.variables])))
self.dump_code(routines, f, prefix, header, empty)
dump_f95.extension = code_extension
dump_f95.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the interface to a header file.
This file contains all the function declarations.
:Arguments:
routines
A list of Routine instances
f
A file-like object to write the file to
prefix
The filename prefix
:Optional arguments:
header
When True, a header comment is included on top of each source
file. [DEFAULT=True]
empty
When True, empty lines are included to structure the source
files. [DEFAULT=True]
"""
if header:
print >> f, ''.join(self._get_header())
if empty: print >> f
# declaration of the function prototypes
for routine in routines:
prototype = self.get_interface(routine)
f.write(prototype)
if empty: print >> f
dump_h.extension = interface_extension
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_f95, dump_h]
def get_code_generator(language, project):
CodeGenClass = {"C": CCodeGen, "F95": FCodeGen}.get(language.upper())
if CodeGenClass is None:
raise ValueError("Language '%s' is not supported." % language)
return CodeGenClass(project)
#
# Friendly functions
#
def codegen(name_expr, language, prefix, project="project", to_files=False, header=True, empty=True,
argument_sequence=None):
"""Write source code for the given expressions in the given language.
:Mandatory Arguments:
``name_expr``
A single (name, expression) tuple or a list of (name, expression)
tuples. Each tuple corresponds to a routine. If the expression is an
equality (an instance of class Equality) the left hand side is
considered an output argument.
``language``
A string that indicates the source code language. This is case
insensitive. For the moment, only 'C' and 'F95' is supported.
``prefix``
A prefix for the names of the files that contain the source code.
Proper (language dependent) suffixes will be appended.
:Optional Arguments:
``project``
A project name, used for making unique preprocessor instructions.
[DEFAULT="project"]
``to_files``
When True, the code will be written to one or more files with the given
prefix, otherwise strings with the names and contents of these files
are returned. [DEFAULT=False]
``header``
When True, a header is written on top of each source file.
[DEFAULT=True]
``empty``
When True, empty lines are used to structure the code. [DEFAULT=True]
``argument_sequence``
sequence of arguments for the routine in a preferred order. A
CodeGenError is raised if required arguments are missing. Redundant
arguments are used without warning.
If omitted, arguments will be ordered alphabetically, but with all
input aguments first, and then output or in-out arguments.
>>> from sympy import symbols
>>> from sympy.utilities.codegen import codegen
>>> from sympy.abc import x, y, z
>>> [(c_name, c_code), (h_name, c_header)] = codegen(
... ("f", x+y*z), "C", "test", header=False, empty=False)
>>> print c_name
test.c
>>> print c_code,
#include "test.h"
#include <math.h>
double f(double x, double y, double z) {
return x + y*z;
}
>>> print h_name
test.h
>>> print c_header,
#ifndef PROJECT__TEST__H
#define PROJECT__TEST__H
double f(double x, double y, double z);
#endif
"""
# Initialize the code generator.
code_gen = get_code_generator(language, project)
# Construct the routines based on the name_expression pairs.
# mainly the input arguments require some work
routines = []
if isinstance(name_expr[0], basestring):
# single tuple is given, turn it into a singleton list with a tuple.
name_expr = [name_expr]
for name, expr in name_expr:
routines.append(Routine(name, expr, argument_sequence))
# Write the code.
return code_gen.write(routines, prefix, to_files, header, empty)
| |
# coding: utf-8
"""Expose data to different interface
ZMQStream explose to a ZeroMQ socket in a REQ/REP pattern.
Copyright (c) 2017, European X-Ray Free-Electron Laser Facility GmbH
All rights reserved.
You should have received a copy of the 3-Clause BSD License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>
"""
from argparse import ArgumentParser
import os.path as osp
from queue import Queue
from threading import Event, Thread
from time import time
import msgpack
import numpy as np
import zmq
from .reader import RunDirectory, H5File
__all__ = ['ZMQStreamer', 'serve_files']
class REPInterface(Thread):
def __init__(self, context, port, buffer):
super(REPInterface, self).__init__()
self.context = context
self.port = port
self.buffer = buffer
self._stop_event = Event()
def run(self):
interface = self.context.socket(zmq.REP)
try:
interface.bind('tcp://*:{}'.format(self.port))
while not self.stopped():
req = interface.recv()
if req != b'next':
raise RuntimeError('Unknown request:', req)
interface.send_multipart(self.buffer.get())
finally:
interface.setsockopt(zmq.LINGER, 0)
interface.close()
def stop(self):
self._stop_event.set()
def stopped(self):
return self._stop_event.is_set()
class ZMQStreamer:
"""ZeroMQ inteface sending data over a TCP socket.
::
# Server:
serve = ZMQStreamer(1234)
serve.start()
for tid, data in run.trains():
result = important_processing(data)
serve.feed(result)
# Client:
from karabo_bridge import Client
client = Client('tcp://server.hostname:1234')
data = client.next()
Parameters
----------
port: int
Local TCP port to bind socket to
maxlen: int, optional
How many trains to cache before sending (default: 10)
protocol_version: ('1.0' | '2.1')
Which version of the bridge protocol to use. Defaults to the latest
version implemented.
dummy_timestamps: bool
Some tools (such as OnDA) expect the timestamp information to be in the
messages. We can't give accurate timestamps where these are not in the
file, so this option generates fake timestamps from the time the data
is fed in.
"""
def __init__(self, port, maxlen=10, protocol_version='2.2', dummy_timestamps=False):
self._context = zmq.Context()
self.port = port
if protocol_version not in {'1.0', '2.2'}:
raise ValueError("Unknown protocol version %r" % protocol_version)
elif protocol_version == '1.0':
import msgpack_numpy
self.pack = msgpack.Packer(
use_bin_type=True, default=msgpack_numpy.encode
).pack
else:
self.pack = msgpack.Packer(use_bin_type=True).pack
self.protocol_version = protocol_version
self.dummy_timestamps = dummy_timestamps
self._buffer = Queue(maxsize=maxlen)
self._interface = None
def start(self):
"""Start a zmq.REP socket.
"""
self._interface = REPInterface(self._context, self.port, self._buffer)
self._interface.daemon = True
self._interface.start()
def stop(self):
if self._interface:
self._interface.stop()
self._interface.join()
self._interface = None
def _serialize(self, data, metadata=None):
if not metadata:
metadata = {src: v.get('metadata', {}) for src, v in data.items()}
if self.dummy_timestamps:
ts = time()
sec, frac = str(ts).split('.')
frac = frac.ljust(18, '0')
update_dummy = {
'timestamp': ts,
'timestamp.sec': sec,
'timestamp.frac': frac,
}
for src in data.keys():
if 'timestamp' not in metadata[src]:
metadata[src].update(update_dummy)
if self.protocol_version == '1.0':
return [self.pack(data)]
msg = []
for src, props in sorted(data.items()):
main_data = {}
arrays = []
for key, value in props.items():
if isinstance(value, np.ndarray):
arrays.append((key, value))
elif isinstance(value, np.number):
# Convert numpy type to native Python type
main_data[key] = value.item()
else:
main_data[key] = value
msg.extend([
self.pack({
'source': src, 'content': 'msgpack',
'metadata': metadata[src]
}),
self.pack(main_data)
])
for key, array in arrays:
if not array.flags['C_CONTIGUOUS']:
array = np.ascontiguousarray(array)
msg.extend([
self.pack({
'source': src, 'content': 'array', 'path': key,
'dtype': str(array.dtype), 'shape': array.shape
}),
array.data,
])
return msg
def feed(self, data, metadata=None):
"""Push data to the sending queue.
This blocks if the queue already has *maxlen* items waiting to be sent.
Parameters
----------
data : dict
Contains train data. The dictionary has to follow the karabo_bridge
protocol structure:
- keys are source names
- values are dict, where the keys are the parameter names and
values must be python built-in types or numpy.ndarray.
metadata : dict, optional
Contains train metadata. The dictionary has to follow the
karabo_bridge protocol structure:
- keys are (str) source names
- values (dict) should contain the following items:
- 'timestamp' Unix time with subsecond resolution
- 'timestamp.sec' Unix time with second resolution
- 'timestamp.frac' fractional part with attosecond resolution
- 'timestamp.tid' is European XFEL train unique ID
::
{
'source': 'sourceName' # str
'timestamp': 1234.567890 # float
'timestamp.sec': '1234' # str
'timestamp.frac': '567890000000000000' # str
'timestamp.tid': 1234567890 # int
}
If the metadata dict is not provided it will be extracted from
'data' or an empty dict if 'metadata' key is missing from a data
source.
"""
self._buffer.put(self._serialize(data, metadata))
def serve_files(path, port, source_glob='*', key_glob='*', **kwargs):
"""Stream data from files through a TCP socket.
Parameters
----------
path: str
Path to the HDF5 file or file folder.
port: int
Local TCP port to bind socket to.
source_glob: str
Only stream sources matching this glob pattern.
Streaming data selectively is more efficient than streaming everything.
key_glob: str
Only stream keys matching this glob pattern in the selected sources.
"""
if osp.isdir(path):
data = RunDirectory(path)
else:
data = H5File(path)
data = data.select(source_glob, key_glob)
streamer = ZMQStreamer(port, **kwargs)
streamer.start()
for tid, train_data in data.trains():
if train_data:
streamer.feed(train_data)
streamer.stop()
def main(argv=None):
ap = ArgumentParser(prog="karabo-bridge-serve-files")
ap.add_argument("path", help="Path of a file or run directory to serve")
ap.add_argument("port", help="TCP port to run server on")
ap.add_argument(
"--source", help="Stream only matching sources ('*' is a wildcard)",
default='*',
)
ap.add_argument(
"--key", help="Stream only matching keys ('*' is a wildcard)",
default='*',
)
args = ap.parse_args(argv)
serve_files(args.path, args.port)
| |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Definition of the base tasks.
The base tasks define how task interact between them and with the database, how
ressources can be shared and how preferences are handled.
"""
from atom.api import Atom, Dict, Bool, Value, Signal, List, Typed, ForwardTyped
from threading import Lock
class DatabaseNode(Atom):
"""Helper class to differentiate nodes and dict in database
"""
#: Reference to the parent node.
parent = ForwardTyped(lambda: DatabaseNode)
#: Actual data hold by this node.
data = Dict()
#: Metadata associated with this node such as access exceptions.
meta = Dict()
class TaskDatabase(Atom):
""" A database for inter tasks communication.
The database has two modes:
- an edition mode in which the number of entries and their hierarchy
can change. In this mode the database is represented by a nested dict.
- a running mode in which the entries are fixed (only their values can
change). In this mode the database is represented as a flat list.
In running mode the database is thread safe but the object it contains
may not be so (dict, list, etc)
"""
#: Signal used to notify a value changed in the database.
#: In edition mode the update is passed as a tuple ('added', path, value)
#: for creation, as ('renamed', old, new, value) in case of renaming,
#: ('removed', old) in case of deletion or as a list of such tuples.
#: In running mode, a 2-tuple (path, value) is sent as entries cannot be
#: renamed or removed.
notifier = Signal()
#: Signal emitted to notify that access exceptions has changed. The update
#: is passed as a tuple ('added', path, relative, entry) for creation or as
#: ('renamed', path, relative, old, new) in case of renaming of the related
#: entry, ('removed', path, relative, old) in case of deletion (if old is
#: None all exceptions have been removed) or as a list of such tuples.
#: Path indicate the node where the exception is located, relative the
#: relative path from the 'path' node to the real location of the entry.
access_notifier = Signal()
#: Signal emitted to notify that the nodes were modified. The update
#: is passed as a tuple ('added', path, name, node) for creation or as
#: ('renamed', path, old, new) in case of renaming of the related node,
#: ('removed', path, old) in case of deletion or as a list of such tuples.
nodes_notifier = Signal()
#: List of root entries which should not be listed.
excluded = List(default=['threads', 'instrs'])
#: Flag indicating whether or not the database entered the running mode. In
#: running mode the database is flattened into a list for faster acces.
running = Bool(False)
def set_value(self, node_path, value_name, value):
"""Method used to set the value of the entry at the specified path
This method can be used both in edition and running mode.
Parameters
----------
node_path : unicode
Path to the node holding the value to be set
value_name : unicode
Public key associated with the value to be set, internally
converted so that we do not mix value and nodes
value : any
Actual value to be stored
Returns
-------
new_val : bool
Boolean indicating whether or not a new entry has been created in
the database
"""
new_val = False
if self.running:
full_path = node_path + '/' + value_name
index = self._entry_index_map[full_path]
with self._lock:
self._flat_database[index] = value
self.notifier((node_path + '/' + value_name, value))
else:
node = self.go_to_path(node_path)
if value_name not in node.data:
new_val = True
node.data[value_name] = value
if new_val:
self.notifier(('added', node_path + '/' + value_name, value))
return new_val
def get_value(self, assumed_path, value_name):
"""Method to get a value from the database from its name and a path
This method returns the value stored under the specified name. It
starts looking at the specified path and if necessary goes up in the
hierarchy.
Parameters
----------
assumed_path : unicode
Path where we start looking for the entry
value_name : unicode
Name of the value we are looking for
Returns
-------
value : object
Value stored under the entry value_name
"""
if self.running:
index = self._find_index(assumed_path, value_name)
return self._flat_database[index]
else:
node = self.go_to_path(assumed_path)
# First check if the entry is in the current node.
if value_name in node.data:
value = node.data[value_name]
return value
# Second check if there is a special rule about this entry.
elif 'access' in node.meta and value_name in node.meta['access']:
path = assumed_path + '/' + node.meta['access'][value_name]
return self.get_value(path, value_name)
# Finally go one step up in the node hierarchy.
else:
new_assumed_path = assumed_path.rpartition('/')[0]
if assumed_path == new_assumed_path:
mes = "Can't find database entry : {}".format(value_name)
raise KeyError(mes)
return self.get_value(new_assumed_path, value_name)
def rename_values(self, node_path, old, new, access_exs=None):
"""Rename database entries.
This method can update the access exceptions attached to them.
This method cannot be used in running mode.
Parameters
----------
node_path : unicode
Path to the node holding the value.
old : iterable
Old names of the values.
new : iterable
New names of the values.
access_exs : iterable, optional
Dict mapping old entries names to how far the access exception is
located.
"""
if self.running:
raise RuntimeError('Cannot delete an entry in running mode')
node = self.go_to_path(node_path)
notif = []
acc_notif = []
access_exs = access_exs if access_exs else {}
for i, old_name in enumerate(old):
if old_name in node.data:
val = node.data.pop(old_name)
node.data[new[i]] = val
notif.append(('renamed',
node_path + '/' + old_name,
node_path + '/' + new[i],
val))
if old_name in access_exs:
count = access_exs[old_name]
n = node
p = node_path
while count:
n = n.parent if n.parent else n
p, _ = p.rsplit('/', 1)
count -= 1
path = n.meta['access'].pop(old_name)
n.meta['access'][new[i]] = path
acc_notif.append(('renamed', p, path, old_name, new[i]))
else:
err_str = 'No entry {} in node {}'.format(old_name,
node_path)
raise KeyError(err_str)
# Avoid sending spurious notifications
if notif:
self.notifier(notif)
if acc_notif:
self.access_notifier(acc_notif)
def delete_value(self, node_path, value_name):
"""Remove an entry from the specified node
This method remove the specified entry from the specified node. It does
not handle removing the access exceptions attached to it. This
method cannot be used in running mode.
Parameters
----------
assumed_path : unicode
Path where we start looking for the entry
value_name : unicode
Name of the value we are looking for
"""
if self.running:
raise RuntimeError('Cannot delete an entry in running mode')
else:
node = self.go_to_path(node_path)
if value_name in node.data:
del node.data[value_name]
self.notifier(('removed', node_path + '/' + value_name))
else:
err_str = 'No entry {} in node {}'.format(value_name,
node_path)
raise KeyError(err_str)
def get_values_by_index(self, indexes, prefix=None):
"""Access to a list of values using the flat database.
Parameters
----------
indexes : list(int)
List of index for which values should be returned.
prefix : unicode, optional
If provided return the values in dict with key of the form :
prefix + index.
Returns
-------
values : list or dict
List of requested values in the same order as indexes or dict if
prefix was not None.
"""
if prefix is None:
return [self._flat_database[i] for i in indexes]
else:
return {prefix + str(i): self._flat_database[i] for i in indexes}
def get_entries_indexes(self, assumed_path, entries):
""" Access to the index in the flattened database for some entries.
Parameters
----------
assumed_path : unicode
Path to the node in which the values are assumed to be stored.
entries : iterable(unicode)
Names of the entries for which the indexes should be returned.
Returns
-------
indexes : dict
Dict mapping the entries names to their index in the flattened
database.
"""
return {name: self._find_index(assumed_path, name)
for name in entries}
def list_accessible_entries(self, node_path):
"""Method used to get a list of all entries accessible from a node.
DO NOT USE THIS METHOD IN RUNNING MODE (ie never in the check method
of a task, use a try except clause instead and get_value or
get_entries_indexes).
Parameters
----------
node_path : unicode
Path to the node from which accessible entries should be listed.
Returns
-------
entries_list : list(unicode)
List of entries accessible from the specified node
"""
entries = []
while True:
node = self.go_to_path(node_path)
keys = node.data.keys()
# Looking for the entries in the node.
for key in keys:
if not isinstance(node.data[key], DatabaseNode):
entries.append(key)
# Adding the special access if they are not already in the list.
for entry in node.meta.get('access', []):
if entry not in entries:
entries.append(entry)
if node_path != 'root':
# Going to the next node.
node_path = node_path.rpartition('/')[0]
else:
break
for entry in self.excluded:
if entry in entries:
entries.remove(entry)
return sorted(entries)
def list_all_entries(self, path='root', values=False):
"""List all entries in the database.
Parameters
----------
path : unicode, optional
Starting node. This parameters is for internal use only.
values : bool, optional
Whether or not to return the values associated with the entries.
Returns
-------
paths : list(unicode) or dict if values
List of all accessible entries with their full path.
"""
entries = [] if not values else {}
node = self.go_to_path(path)
for entry in node.data.keys():
if isinstance(node.data[entry], DatabaseNode):
aux = self.list_all_entries(path=path + '/' + entry,
values=values)
if not values:
entries.extend(aux)
else:
entries.update(aux)
else:
if not values:
entries.append(path + '/' + entry)
else:
entries[path + '/' + entry] = node.data[entry]
if path == 'root':
for entry in self.excluded:
aux = path + '/' + entry
if aux in entries:
if not values:
entries.remove(aux)
else:
del entries[aux]
return sorted(entries) if not values else entries
def add_access_exception(self, node_path, entry_node, entry):
"""Add an access exception in a node for an entry located in a node
below.
Parameters
----------
node_path : unicode
Path to the node which should hold the exception.
entry_node : unicode
Absolute path to the node holding the entry.
entry : unicode
Name of the entry for which to create an exception.
"""
node = self.go_to_path(node_path)
rel_path = entry_node[len(node_path)+1:]
if 'access' in node.meta:
access_exceptions = node.meta['access']
access_exceptions[entry] = rel_path
else:
node.meta['access'] = {entry: rel_path}
self.access_notifier(('added', node_path, rel_path, entry))
def remove_access_exception(self, node_path, entry=None):
"""Remove an access exception from a node for a given entry.
Parameters
----------
node_path : unicode
Path to the node holding the exception.
entry : unicode, optional
Name of the entry for which to remove the exception, if not
provided all access exceptions will be removed.
"""
node = self.go_to_path(node_path)
if entry:
access_exceptions = node.meta['access']
relative_path = access_exceptions[entry]
del access_exceptions[entry]
else:
relative_path = ''
del node.meta['access']
self.access_notifier(('removed', node_path, relative_path, entry))
def create_node(self, parent_path, node_name):
"""Method used to create a new node in the database
This method creates a new node in the database at the specified path.
This method is not thread safe safe as the hierarchy of the tasks'
database is not supposed to change during a measurement but only during
the configuration phase
Parameters
----------
parent_path : unicode
Path to the node parent of the new one
node_name : unicode
Name of the new node to create
"""
if self.running:
raise RuntimeError('Cannot create a node in running mode')
parent_node = self.go_to_path(parent_path)
node = DatabaseNode(parent=parent_node)
parent_node.data[node_name] = node
self.nodes_notifier(('added', parent_path, node_name, node))
def rename_node(self, parent_path, old_name, new_name):
"""Method used to rename a node in the database
Parameters
----------
parent_path : unicode
Path to the parent of the node being renamed
old_name : unicode
Old name of the node.
node_name : unicode
New name of node
"""
if self.running:
raise RuntimeError('Cannot rename a node in running mode')
parent_node = self.go_to_path(parent_path)
parent_node.data[new_name] = parent_node.data[old_name]
del parent_node.data[old_name]
while parent_node:
if 'access' not in parent_node.meta:
parent_node = parent_node.parent
continue
access = parent_node.meta['access'].copy()
for k, v in access.items():
if old_name in v:
new_path = v.replace(old_name, new_name)
parent_node.meta['access'][k] = new_path
parent_node = parent_node.parent
self.nodes_notifier(('renamed', parent_path, old_name, new_name))
def delete_node(self, parent_path, node_name):
"""Method used to delete an existing node from the database
Parameters
----------
parent_path : unicode
Path to the node parent of the new one
node_name : unicode
Name of the new node to create
"""
if self.running:
raise RuntimeError('Cannot delete a node in running mode')
parent_node = self.go_to_path(parent_path)
if node_name in parent_node.data:
del parent_node.data[node_name]
else:
err_str = 'No node {} at the path {}'.format(node_name,
parent_path)
raise KeyError(err_str)
self.nodes_notifier(('removed', parent_path, node_name))
def copy_node_values(self, node='root'):
"""Copy the values (ie not subnodes) found in a node.
Parameters
----------
node : unicode, optional
Path to the node to copy.
Returns
-------
copy : dict
Copy of the node values.
"""
node = self.go_to_path(node)
return {k: v for k, v in node.data.items()
if not isinstance(v, DatabaseNode)}
def prepare_to_run(self):
"""Enter a thread safe, flat database state.
This is used when tasks are executed.
"""
self._lock = Lock()
self.running = True
# Flattening the database by walking all the nodes.
index = 0
nodes = [('root', self._database)]
mapping = {}
datas = []
for (node_path, node) in nodes:
for key, val in node.data.items():
path = node_path + '/' + key
if isinstance(val, DatabaseNode):
nodes.append((path, val))
else:
mapping[path] = index
index += 1
datas.append(val)
# Walking a second time to add the exception to the _entry_index_map,
# in reverse order in case an entry has multiple exceptions.
for (node_path, node) in nodes[::-1]:
access = node.meta.get('access', [])
for entry in access:
short_path = node_path + '/' + entry
full_path = node_path + '/' + access[entry] + '/' + entry
mapping[short_path] = mapping[full_path]
self._flat_database = datas
self._entry_index_map = mapping
self._database = None
def list_nodes(self):
"""List all the nodes present in the database.
Returns
-------
nodes : dict
Dictionary storing the nodes by path
"""
nodes = [('root', self._database)]
for (node_path, node) in nodes:
for key, val in node.data.items():
if isinstance(val, DatabaseNode):
path = node_path + '/' + key
nodes.append((path, val))
return dict(nodes)
def go_to_path(self, path):
"""Method used to reach a node specified by a path.
"""
node = self._database
if path == 'root':
return node
# Decompose the path in database keys
keys = path.split('/')
# Remove first key (ie 'root' as we are not trying to access it)
del keys[0]
for key in keys:
if key in node.data:
node = node.data[key]
else:
ind = keys.index(key)
if ind == 0:
err_str = \
'Path {} is invalid, no node {} in root'.format(path,
key)
else:
err_str = 'Path {} is invalid, no node {} in node\
{}'.format(path, key, keys[ind-1])
raise KeyError(err_str)
return node
# =========================================================================
# --- Private API ---------------------------------------------------------
# =========================================================================
#: Main container for the database.
_database = Typed(DatabaseNode, ())
#: Flat version of the database only used in running mode for perfomances
#: issues.
_flat_database = List()
#: Dict mapping full paths to flat database indexes.
_entry_index_map = Dict()
#: Lock to make the database thread safe in running mode.
_lock = Value()
def _find_index(self, assumed_path, entry):
"""Find the index associated with a path.
Only to be used in running mode.
"""
path = assumed_path
while path != 'root':
full_path = path + '/' + entry
if full_path in self._entry_index_map:
return self._entry_index_map[full_path]
path = path.rpartition('/')[0]
full_path = path + '/' + entry
if full_path in self._entry_index_map:
return self._entry_index_map[full_path]
raise KeyError("Can't find entry matching {}, {}".format(assumed_path,
entry))
| |
#!/usr/bin/env python
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import sys
import shlex
import string
import ipaddress
# WANT_JSON
# Regular expressions to identify comments and blank lines
comment = re.compile("^\s*#")
blank = re.compile("^\s*$")
#####
# Parsers
#
# Parses are methods that take the form 'parse_<keyword>', where the keyword
# is the first word on a line in file. The purpose of the parser is to
# evaluate the line tna update the interface model accordingly.
####
# Compares the current and desired network configuration to see if there
# is a change and returns:
# 0 if no change
# -1 if the change has no semantic value (i.e. comments differ)
# 1 if there is a semantic change (i.e. its meaningful)
# the highest priority of change is returned, i.e. if there is both
# a semantic and non-semantic change a 1 is returned indicating a
# semantic change.
def value_equal(left, right):
if type(left) == type(right):
return left == right
return str(left) == str(right)
def compare(have, want):
result = 0
for key in list(set().union(have.keys(), want.keys())):
if key in have.keys() and key in want.keys():
if not value_equal(have[key], want[key]):
if key in ["description"]:
result = -1
else:
return 1
else:
if key in ["description"]:
result = -1
else:
return 1
return result
# Creates an interface definition in the model and sets the auto
# configuration to true
def parse_auto(data, current, words, description):
if words[1] in data.keys():
iface = data[words[1]]
else:
iface = {}
if len(description) > 0:
iface["description"] = description
iface["auto"] = True
data[words[1]] = iface
return words[1]
# Creates an interface definition in the model if one does not exist and
# sets the type and configuation method
def parse_iface(data, current, words, description):
if words[1] in data.keys():
iface = data[words[1]]
else:
iface = {}
if len(description) > 0:
iface["description"] = description
iface["type"] = words[2]
iface["config"] = words[3]
data[words[1]] = iface
return words[1]
allow_lists = ["pre-up", "post-up", "pre-down", "post-down"]
# Used to evaluate attributes and add a generic name / value pair to the interface
# model
def parse_add_attr(data, current, words, description):
global allow_lists
if current == "":
raise SyntaxError("Attempt to add attribute '%s' without an interface" % words[0])
if current in data.keys():
iface = data[current]
else:
iface = {}
if len(description) > 0:
iface["description"] = description
if words[0] in iface and words[0] in allow_lists:
have = iface[words[0]]
if type(have) is list:
iface[words[0]].append(" ".join(words[1:]))
else:
iface[words[0]] = [have, " ".join(words[1:])]
else:
iface[words[0]] = " ".join(words[1:])
data[current] = iface
return current
#####
# Writers
#
# Writers take the form of 'write_<keyword>` where keyword is an interface
# attribute. The role of the writer is to output the attribute to the
# output stream, i.e. the new interface file.
#####
# Writes a generic name / value pair indented
def write_attr(out, name, value):
if isinstance(value, list):
for line in value:
out.write(" %s %s\n" % (name, line))
else:
out.write(" %s %s\n" % (name, value))
# Writes an interface definition to the output stream
def write_iface(out, name, iface):
if "description" in iface.keys():
val = iface["description"]
if len(val) > 0 and val[0] != "#":
val = "# " + val
out.write("%s\n" % (val))
if "auto" in iface.keys() and iface["auto"]:
out.write("auto %s\n" % (name))
out.write("iface %s %s %s\n" % (name, iface["type"], iface["config"]))
for attr in sorted(iface.keys(), key=lambda x:x in write_sort_order.keys() and write_sort_order[x] or 100):
if attr in write_ignore:
continue
writer = "write_%s" % (attr)
if writer in all_methods:
globals()[writer](out, attr, iface[attr])
else:
write_attr(out, attr, iface[attr])
out.write("\n")
# Writes the new interface file
def write(out, data):
# out.write("# This file describes the network interfaces available on your system\n")
# out.write("# and how to activate them. For more information, see interfaces(5).\n\n")
# First to loopback
for name, iface in data.items():
if iface["config"] != "loopback":
continue
write_iface(out, name, iface)
for iface in sorted(data.keys(), key=lambda x:x in write_iface_sort_order.keys() and write_iface_sort_order[x] or x):
if data[iface]["config"] == "loopback":
continue
write_iface(out, iface, data[iface])
# The defaults for the netfile task
src_file = "/etc/network/interfaces"
dest_file = None
merge_comments = False
state = "present"
name = ""
force = False
values = {
"config": "manual",
"type": "inet"
}
# read the argument string from the arguments file
args_file = sys.argv[1]
args_data = file(args_file).read()
# parse the task options
arguments = json.loads(args_data)
for key, value in arguments.iteritems():
if key == "src":
src_file = value
elif key == "dest":
dest_file = value
elif key == "name":
name = value
elif key == "state":
state = value
elif key == "force":
force = value.lower() in ['true', 't', 'yes', 'y']
elif key == "description":
values["description"] = value
elif key == "merge-comments":
merge_comments = value.lower() in ['true', 't', 'yes', 'y']
elif key == "address":
if string.find(value, "/") != -1:
parts = value.split('/')
addr = ipaddress.ip_network(value, strict=False)
values["address"] = parts[0]
values["network"] = addr.network_address.exploded.encode('ascii','ignore')
values["netmask"] = addr.netmask.exploded.encode('ascii','ignore')
values["broadcast"] = addr.broadcast_address.exploded.encode('ascii','ignore')
else:
values["address"] = value
elif key[0] != '_':
values[key] = value
# If name is not set we need to error out
if name == "":
result = {
"changed": False,
"failed": True,
"msg": "Name is a mansitory parameter",
}
print json.dumps(result)
sys.stdout.flush()
exit(1)
# If no destination file was specified, write it back to the same file
if not dest_file:
dest_file = src_file
# all methods is used to check if parser or writer methods exist
all_methods = dir()
# which attributes should be ignored and not be written as single
# attributes values against and interface
write_ignore = ["auto", "type", "config", "description", "source"]
# specifies the order in which attributes are written against an
# interface. Any attribute note in this list is sorted by default
# order after the attributes specified.
write_sort_order = {
"address" : 1,
"network" : 2,
"netmask" : 3,
"broadcast" : 4,
"gateway" : 5,
"pre-up" : 10,
"post-up" : 11,
"pre-down" : 12,
"post-down" : 13
}
write_iface_sort_order = {
"fabric" : "y",
"mgmtbr" : "z"
}
# Read and parse the specified interface file
file = open(src_file, "r")
ifaces = {}
current = "" # The current interface being parsed
description = ""
for line in file.readlines():
line = line.rstrip('\n')
if comment.match(line):
if len(description) > 0:
description = description + '\n' + line
else:
description = line
if len(description) > 0 and blank.match(line):
description = description + '\n'
# Drop any comment of blank line
if comment.match(line) or blank.match(line):
continue
# Parse the line
words = line.split()
parser = "parse_" + words[0].replace("-", "_")
if parser in all_methods:
current = globals()[parser](ifaces, current, words, description)
else:
current = parse_add_attr(ifaces, current, words, description)
description = ""
file.close()
# Assume no change unless we discover otherwise
result = {
"changed" : False
}
change_type = 0
# if the interface specified and state is present then either add
# it to the model or replace it if it already exists.
if state == "query":
if name in ifaces.keys():
result["interface"] = ifaces[name]
result["found"] = True
else:
result["found"] = False
elif state == "present":
if name in ifaces.keys():
have = ifaces[name]
change_type = compare(have, values)
result["change_type"] = change_type
if change_type != 0:
ifaces[name] = values
if merge_comments and "description" in have.keys() and len(have["description"]) > 0:
result["merge_comments"] = True
if "description" in values.keys() and len(values["description"]) > 0:
ifaces[name]["description"] = values["description"] + "\n" + have["description"]
else:
ifaces[name]["description"] = have["description"]
result["changed"] = (change_type == 1)
else:
ifaces[name] = values
result["changed"] = True
# if state is absent then remove it from the model
elif state == "absent" and name in ifaces.keys():
del ifaces[name]
result["changed"] = True
# Only write the output file if something has changed or if the
# task requests a forced write.
if force or result["changed"] or change_type != 0:
file = open(dest_file, "w+")
write(file, ifaces)
file.close()
# Output the task result
print json.dumps(result)
| |
#!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from google.cloud import datacatalog
from google.datacatalog_connectors.commons import prepare
from google.datacatalog_connectors.qlik import sync
class MetadataSynchronizerTest(unittest.TestCase):
__SYNC_PACKAGE = 'google.datacatalog_connectors.qlik.sync'
__SYNCR_MODULE = f'{__SYNC_PACKAGE}.metadata_synchronizer'
@mock.patch(f'{__SYNCR_MODULE}.prepare.AssembledEntryFactory',
lambda *args, **kwargs: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.scrape.MetadataScraper',
lambda *args, **kwargs: mock.MagicMock())
def setUp(self):
self.__synchronizer = sync.MetadataSynchronizer(
qlik_server_address='test-server',
qlik_ad_domain='test-domain',
qlik_username='test-username',
qlik_password='test-password',
datacatalog_project_id='test-project-id',
datacatalog_location_id='test-location-id')
def test_constructor_should_set_instance_attributes(self):
attrs = self.__synchronizer.__dict__
self.assertEqual('test-project-id',
attrs['_MetadataSynchronizer__project_id'])
self.assertEqual('test-location-id',
attrs['_MetadataSynchronizer__location_id'])
self.assertIsNotNone(attrs['_MetadataSynchronizer__metadata_scraper'])
self.assertIsNotNone(
attrs['_MetadataSynchronizer__tag_template_factory'])
self.assertEqual('test-server',
attrs['_MetadataSynchronizer__site_url'])
self.assertIsNotNone(
attrs['_MetadataSynchronizer__assembled_entry_factory'])
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor')
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner')
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_no_metadata_should_clean_but_not_ingest_metadata(
self, mock_cleaner, mock_ingestor):
scraper = self.__synchronizer.__dict__[
'_MetadataSynchronizer__metadata_scraper']
self.__synchronizer.run()
scraper.scrape_all_custom_property_definitions.assert_called_once()
scraper.scrape_all_streams.assert_called_once()
cleaner = mock_cleaner.return_value
cleaner.delete_obsolete_metadata.assert_called_once()
ingestor = mock_ingestor.return_value
ingestor.ingest_metadata.assert_not_called()
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor')
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner')
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper')
def test_run_custom_property_def_should_traverse_main_workflow_steps(
self, mock_mapper, mock_cleaner, mock_ingestor):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_custom_property_definitions.return_value = [{
'id': 'test-def',
}]
assembled_entry_factory.make_assembled_entry_for_custom_property_def\
.return_value = prepare.AssembledEntryData(
'test-def',
self.__make_fake_entry('custom_property_definition'),
[])
self.__synchronizer.run()
expected_make_assembled_entries_call_arg = {
'id': 'test-def',
}
actual_call_args = assembled_entry_factory\
.make_assembled_entry_for_custom_property_def.call_args[0]
self.assertEqual(expected_make_assembled_entries_call_arg,
actual_call_args[0])
mapper = mock_mapper.return_value
mapper.fulfill_tag_fields.assert_called_once()
cleaner = mock_cleaner.return_value
cleaner.delete_obsolete_metadata.assert_called_once()
ingestor = mock_ingestor.return_value
ingestor.ingest_metadata.assert_called_once()
@mock.patch(f'{__SYNCR_MODULE}.prepare.DataCatalogTagTemplateFactory'
f'.make_tag_template_for_custom_property_value')
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_custom_property_def_should_make_template_for_choice_value(
self, mock_make_tag_template_for_custom_property_value):
mock_make_tag_template_for_custom_property_value.return_value.name = \
'parent/tagTemplates/def__value_1'
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
scraper.scrape_all_custom_property_definitions.return_value = [{
'id': 'test-def',
'choiceValues': ['Value 1']
}]
self.__synchronizer.run()
mock_make_tag_template_for_custom_property_value.assert_called_once()
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor')
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner')
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper')
def test_run_stream_metadata_should_traverse_main_workflow_steps(
self, mock_mapper, mock_cleaner, mock_ingestor):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_streams.return_value = [self.__make_fake_stream()]
assembled_entry_factory.make_assembled_entries_for_stream\
.return_value = [prepare.AssembledEntryData(
'test-stream', self.__make_fake_entry('stream'), [])]
self.__synchronizer.run()
expected_make_assembled_entries_call_arg = {
'id': 'test-stream',
}
actual_call_args = assembled_entry_factory\
.make_assembled_entries_for_stream.call_args[0]
self.assertEqual(expected_make_assembled_entries_call_arg,
actual_call_args[0])
mapper = mock_mapper.return_value
mapper.fulfill_tag_fields.assert_called_once()
cleaner = mock_cleaner.return_value
cleaner.delete_obsolete_metadata.assert_called_once()
ingestor = mock_ingestor.return_value
ingestor.ingest_metadata.assert_called_once()
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor')
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_stream_metadata_should_process_only_required_template(
self, mock_ingestor):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_streams.return_value = [self.__make_fake_stream()]
fake_entry = self.__make_fake_entry('stream')
fake_tag = self.__make_fake_tag('projects/test-project-id'
'/locations/test-location-id'
'/tagTemplates/qlik_stream_metadata')
assembled_entry_factory.make_assembled_entries_for_stream\
.return_value = [prepare.AssembledEntryData(
'test-stream', fake_entry, [fake_tag])]
self.__synchronizer.run()
ingest_metadata_call_args = \
mock_ingestor.return_value.ingest_metadata.call_args[0]
templates_dict_call_arg = ingest_metadata_call_args[1]
self.assertEqual(1, len(templates_dict_call_arg))
self.assertTrue('qlik_stream_metadata' in templates_dict_call_arg)
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_published_app_should_properly_ask_assembled_entries(self):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_streams.return_value = [self.__make_fake_stream()]
scraper.scrape_all_apps.return_value = \
[self.__make_fake_published_app()]
scraper.scrape_dimensions.return_value = []
scraper.scrape_measures.return_value = []
scraper.scrape_visualizations.return_value = []
scraper.scrape_sheets.return_value = []
self.__synchronizer.run()
expected_make_assembled_entries_call_arg = {
'id':
'test-stream',
'apps': [{
'id': 'test-app',
'published': True,
'stream': {
'id': 'test-stream'
},
'dimensions': [],
'measures': [],
'visualizations': [],
'sheets': [],
}]
}
actual_call_args = assembled_entry_factory\
.make_assembled_entries_for_stream.call_args[0]
self.assertEqual(expected_make_assembled_entries_call_arg,
actual_call_args[0])
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_not_published_app_should_properly_ask_assembled_entries(self):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_streams.return_value = [self.__make_fake_stream()]
scraper.scrape_all_apps.return_value = [self.__make_fake_wip_app()]
assembled_entry_factory.make_assembled_entries_for_stream\
.return_value = [
prepare.AssembledEntryData(
'test-stream', self.__make_fake_entry('stream'), [])
]
self.__synchronizer.run()
expected_make_assembled_entries_call_arg = {
'id': 'test-stream',
}
actual_call_args = assembled_entry_factory\
.make_assembled_entries_for_stream.call_args[0]
self.assertEqual(expected_make_assembled_entries_call_arg,
actual_call_args[0])
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_dimension_should_properly_ask_assembled_entries(self):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_streams.return_value = [self.__make_fake_stream()]
scraper.scrape_all_apps.return_value = \
[self.__make_fake_published_app()]
scraper.scrape_dimensions.return_value = [self.__make_fake_dimension()]
scraper.scrape_measures.return_value = []
scraper.scrape_visualizations.return_value = []
scraper.scrape_sheets.return_value = []
self.__synchronizer.run()
expected_make_assembled_entries_call_arg = {
'id':
'test-stream',
'apps': [{
'id': 'test-app',
'published': True,
'stream': {
'id': 'test-stream'
},
'dimensions': [{
'qInfo': {
'qId': 'test-dimension',
},
'qDim': {},
'qMetaDef': {},
'app': {
'id': 'test-app',
'name': None
},
}],
'measures': [],
'visualizations': [],
'sheets': [],
}]
}
actual_call_args = assembled_entry_factory\
.make_assembled_entries_for_stream.call_args[0]
self.assertEqual(expected_make_assembled_entries_call_arg,
actual_call_args[0])
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_measure_should_properly_ask_assembled_entries(self):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_streams.return_value = [self.__make_fake_stream()]
scraper.scrape_all_apps.return_value = \
[self.__make_fake_published_app()]
scraper.scrape_dimensions.return_value = []
scraper.scrape_measures.return_value = [self.__make_fake_measure()]
scraper.scrape_visualizations.return_value = []
scraper.scrape_sheets.return_value = []
self.__synchronizer.run()
expected_make_assembled_entries_call_arg = {
'id':
'test-stream',
'apps': [{
'id': 'test-app',
'published': True,
'stream': {
'id': 'test-stream'
},
'dimensions': [],
'measures': [{
'qInfo': {
'qId': 'test-measure',
},
'qMeasure': {},
'qMetaDef': {},
'app': {
'id': 'test-app',
'name': None
},
}],
'visualizations': [],
'sheets': [],
}]
}
actual_call_args = assembled_entry_factory\
.make_assembled_entries_for_stream.call_args[0]
self.assertEqual(expected_make_assembled_entries_call_arg,
actual_call_args[0])
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_visualization_should_properly_ask_assembled_entries(self):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_streams.return_value = [self.__make_fake_stream()]
scraper.scrape_all_apps.return_value = \
[self.__make_fake_published_app()]
scraper.scrape_dimensions.return_value = []
scraper.scrape_measures.return_value = []
scraper.scrape_visualizations.return_value = [
self.__make_fake_visualization()
]
scraper.scrape_sheets.return_value = []
self.__synchronizer.run()
expected_make_assembled_entries_call_arg = {
'id':
'test-stream',
'apps': [{
'id': 'test-app',
'published': True,
'stream': {
'id': 'test-stream'
},
'dimensions': [],
'measures': [],
'visualizations': [{
'qInfo': {
'qId': 'test-visualization',
},
'qMetaDef': {},
'app': {
'id': 'test-app',
'name': None
},
}],
'sheets': [],
}]
}
actual_call_args = assembled_entry_factory\
.make_assembled_entries_for_stream.call_args[0]
self.assertEqual(expected_make_assembled_entries_call_arg,
actual_call_args[0])
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_published_sheet_should_properly_ask_assembled_entries(self):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_streams.return_value = [self.__make_fake_stream()]
scraper.scrape_all_apps.return_value = \
[self.__make_fake_published_app()]
scraper.scrape_dimensions.return_value = []
scraper.scrape_measures.return_value = []
scraper.scrape_visualizations.return_value = []
scraper.scrape_sheets.return_value = [
self.__make_fake_published_sheet()
]
self.__synchronizer.run()
expected_make_assembled_entries_call_arg = {
'id':
'test-stream',
'apps': [{
'id':
'test-app',
'published':
True,
'stream': {
'id': 'test-stream'
},
'dimensions': [],
'measures': [],
'visualizations': [],
'sheets': [{
'qInfo': {
'qId': 'test-sheet',
},
'qMeta': {
'published': True,
},
'app': {
'id': 'test-app',
'name': None
},
}],
}]
}
actual_call_args = assembled_entry_factory\
.make_assembled_entries_for_stream.call_args[0]
self.assertEqual(expected_make_assembled_entries_call_arg,
actual_call_args[0])
@mock.patch(f'{__SYNCR_MODULE}.ingest.DataCatalogMetadataIngestor',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.cleanup.DataCatalogMetadataCleaner',
lambda *args: mock.MagicMock())
@mock.patch(f'{__SYNCR_MODULE}.prepare.EntryRelationshipMapper',
lambda *args: mock.MagicMock())
def test_run_not_published_sheet_should_properly_ask_assembled_entries(
self):
attrs = self.__synchronizer.__dict__
scraper = attrs['_MetadataSynchronizer__metadata_scraper']
assembled_entry_factory = attrs[
'_MetadataSynchronizer__assembled_entry_factory']
scraper.scrape_all_streams.return_value = [self.__make_fake_stream()]
scraper.scrape_all_apps.return_value = \
[self.__make_fake_published_app()]
scraper.scrape_dimensions.return_value = []
scraper.scrape_measures.return_value = []
scraper.scrape_visualizations.return_value = []
scraper.scrape_sheets.return_value = [self.__make_fake_wip_sheet()]
self.__synchronizer.run()
expected_make_assembled_entries_call_arg = {
'id':
'test-stream',
'apps': [{
'id': 'test-app',
'published': True,
'stream': {
'id': 'test-stream'
},
'dimensions': [],
'measures': [],
'visualizations': [],
'sheets': [],
}]
}
actual_call_args = assembled_entry_factory\
.make_assembled_entries_for_stream.call_args[0]
self.assertEqual(expected_make_assembled_entries_call_arg,
actual_call_args[0])
@classmethod
def __make_fake_stream(cls):
return {
'id': 'test-stream',
}
@classmethod
def __make_fake_published_app(cls):
return {
'id': 'test-app',
'published': True,
'stream': cls.__make_fake_stream(),
}
@classmethod
def __make_fake_wip_app(cls):
return {
'id': 'test-app',
'published': False,
}
@classmethod
def __make_fake_dimension(cls):
return {
'qInfo': {
'qId': 'test-dimension',
},
'qDim': {},
'qMetaDef': {},
}
@classmethod
def __make_fake_measure(cls):
return {
'qInfo': {
'qId': 'test-measure',
},
'qMeasure': {},
'qMetaDef': {},
}
@classmethod
def __make_fake_visualization(cls):
return {
'qInfo': {
'qId': 'test-visualization',
},
'qMetaDef': {},
}
@classmethod
def __make_fake_sheet(cls):
return {
'qInfo': {
'qId': 'test-sheet',
},
'qMeta': {},
}
@classmethod
def __make_fake_published_sheet(cls):
sheet = cls.__make_fake_sheet()
sheet['qMeta']['published'] = True
return sheet
@classmethod
def __make_fake_wip_sheet(cls):
sheet = cls.__make_fake_sheet()
sheet['qMeta']['published'] = False
return sheet
@classmethod
def __make_fake_entry(cls, user_specified_type):
entry = datacatalog.Entry()
entry.user_specified_type = user_specified_type
return entry
@classmethod
def __make_fake_tag(cls, template_name):
tag = datacatalog.Tag()
tag.template = template_name
return tag
| |
#!/usr/bin/python
import sys
import os
import sqlite3
sys.path.append("../../src/")
import cxxtags_util as cxxtags
sys.path.append("../util/")
import clang.cindex # for kind types
err = 0
def test_one(db, q, a):
global err
res = list(db.execute(q).fetchall())
if len(res) != 1:
print "ERROR: result num: %d"%(len(res))
print " q = ", q
for i in res:
print " ", i
err += 1
else:
if res[0] != a:
print "DIFFER:"
print " ", res[0]
print " ", a
err += 1
if len(sys.argv) != 2:
print "usage: cmd db_file"
exit(1)
db_dir = sys.argv[1]
cur_dir = os.getcwd()
decl_col = "name_list.name, file_list.name, decl.line, decl.col, decl.kind, decl.val, decl.is_def, decl.type_kind, decl.is_pointer FROM " + cxxtags.QUERY_JOINED_TABLE_DECL
ref_col = " name_list.name, file_list.name, ref.line, ref.col, ref.kind, ref_file_list.name, ref.ref_line, ref.ref_col FROM " + cxxtags.QUERY_JOINED_TABLE_REF
q_list = [
# main.cpp
"SELECT "+decl_col+" WHERE line=4 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+decl_col+" WHERE line=5 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+decl_col+" WHERE line=6 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+decl_col+" WHERE line=7 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=11 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL1_0
"SELECT "+decl_col+" WHERE line=12 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL1_1
"SELECT "+decl_col+" WHERE line=13 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL1_2
"SELECT "+decl_col+" WHERE line=14 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL1_3
"SELECT "+decl_col+" WHERE line=17 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=19 AND col=35 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+ref_col+" WHERE line=19 AND col=43 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+ref_col+" WHERE line=19 AND col=51 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+ref_col+" WHERE line=19 AND col=59 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=22 AND col=11 AND file_list.name=\""+cur_dir+"/main.cpp\"", #NS0
"SELECT "+decl_col+" WHERE line=24 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+decl_col+" WHERE line=25 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+decl_col+" WHERE line=26 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+decl_col+" WHERE line=27 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=29 AND col=17 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=31 AND col=42 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+ref_col+" WHERE line=31 AND col=50 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+ref_col+" WHERE line=31 AND col=58 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+ref_col+" WHERE line=31 AND col=66 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=33 AND col=11 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C0
"SELECT "+decl_col+" WHERE line=36 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+decl_col+" WHERE line=37 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+decl_col+" WHERE line=38 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+decl_col+" WHERE line=39 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=41 AND col=14 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=43 AND col=50 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+ref_col+" WHERE line=43 AND col=58 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+ref_col+" WHERE line=43 AND col=66 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+ref_col+" WHERE line=43 AND col=74 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=46 AND col=11 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C1
"SELECT "+ref_col+" WHERE line=46 AND col=23 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C0
"SELECT "+decl_col+" WHERE line=50 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+decl_col+" WHERE line=51 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+decl_col+" WHERE line=52 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+decl_col+" WHERE line=53 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=55 AND col=14 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=57 AND col=50 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+ref_col+" WHERE line=57 AND col=58 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+ref_col+" WHERE line=57 AND col=66 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+ref_col+" WHERE line=57 AND col=74 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=62 AND col=11 AND file_list.name=\""+cur_dir+"/main.cpp\"", #NS1
"SELECT "+decl_col+" WHERE line=64 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+decl_col+" WHERE line=65 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+decl_col+" WHERE line=66 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+decl_col+" WHERE line=67 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=69 AND col=17 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=71 AND col=42 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+ref_col+" WHERE line=71 AND col=50 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+ref_col+" WHERE line=71 AND col=58 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+ref_col+" WHERE line=71 AND col=66 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=73 AND col=11 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C0
"SELECT "+decl_col+" WHERE line=76 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+decl_col+" WHERE line=77 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+decl_col+" WHERE line=78 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+decl_col+" WHERE line=79 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=81 AND col=14 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=83 AND col=50 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+ref_col+" WHERE line=83 AND col=58 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+ref_col+" WHERE line=83 AND col=66 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+ref_col+" WHERE line=83 AND col=74 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=86 AND col=11 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C1
"SELECT "+ref_col+" WHERE line=86 AND col=23 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C0
"SELECT "+decl_col+" WHERE line=89 AND col=14 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=91 AND col=50 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_0
"SELECT "+ref_col+" WHERE line=91 AND col=58 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_1
"SELECT "+ref_col+" WHERE line=91 AND col=66 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_2
"SELECT "+ref_col+" WHERE line=91 AND col=74 AND file_list.name=\""+cur_dir+"/main.cpp\"", #VAL0_3
"SELECT "+decl_col+" WHERE line=96 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #main
"SELECT "+ref_col+" WHERE line=98 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #NS0
"SELECT "+ref_col+" WHERE line=98 AND col=10 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C0
"SELECT "+decl_col+" WHERE line=98 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #c00
"SELECT "+ref_col+" WHERE line=99 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #NS0
"SELECT "+ref_col+" WHERE line=99 AND col=10 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C1
"SELECT "+decl_col+" WHERE line=99 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #c01
"SELECT "+ref_col+" WHERE line=100 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #NS1
"SELECT "+ref_col+" WHERE line=100 AND col=10 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C0
"SELECT "+decl_col+" WHERE line=100 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #c10
"SELECT "+ref_col+" WHERE line=101 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #NS1
"SELECT "+ref_col+" WHERE line=101 AND col=10 AND file_list.name=\""+cur_dir+"/main.cpp\"", #C1
"SELECT "+decl_col+" WHERE line=101 AND col=13 AND file_list.name=\""+cur_dir+"/main.cpp\"", #c11
"SELECT "+ref_col+" WHERE line=102 AND col=7 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=103 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #NS0
"SELECT "+ref_col+" WHERE line=103 AND col=10 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=104 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #NS1
"SELECT "+ref_col+" WHERE line=104 AND col=10 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=105 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #c00
"SELECT "+ref_col+" WHERE line=105 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=106 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #c01
"SELECT "+ref_col+" WHERE line=106 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=107 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #c10
"SELECT "+ref_col+" WHERE line=107 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
"SELECT "+ref_col+" WHERE line=108 AND col=5 AND file_list.name=\""+cur_dir+"/main.cpp\"", #c11
"SELECT "+ref_col+" WHERE line=108 AND col=9 AND file_list.name=\""+cur_dir+"/main.cpp\"", #check
]
a_list = [
('VAL0_0',cur_dir+'/main.cpp',4,5,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,0,1,106,0),
('VAL0_1',cur_dir+'/main.cpp',5,5,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,1,1,106,0),
('VAL0_2',cur_dir+'/main.cpp',6,5,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,2,1,106,0),
('VAL0_3',cur_dir+'/main.cpp',7,5,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,3,1,106,0),
('VAL1_0',cur_dir+'/main.cpp',11,5,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,0,1,106,0),
('VAL1_1',cur_dir+'/main.cpp',12,5,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,1,1,106,0),
('VAL1_2',cur_dir+'/main.cpp',13,5,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,2,1,106,0),
('VAL1_3',cur_dir+'/main.cpp',14,5,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,3,1,106,0),
('check',cur_dir+'/main.cpp',17,13,clang.cindex.CursorKind.FUNCTION_DECL.value,0,1, 2, 0),
('VAL0_0',cur_dir+'/main.cpp',19,35,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',4,5),
('VAL0_1',cur_dir+'/main.cpp',19,43,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',5,5),
('VAL0_2',cur_dir+'/main.cpp',19,51,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',6,5),
('VAL0_3',cur_dir+'/main.cpp',19,59,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',7,5),
('NS0',cur_dir+'/main.cpp',22,11,clang.cindex.CursorKind.NAMESPACE.value,0,1,0,0),
('VAL0_0',cur_dir+'/main.cpp',24,9,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,10,1,106,0),
('VAL0_1',cur_dir+'/main.cpp',25,9,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,11,1,106,0),
('VAL0_2',cur_dir+'/main.cpp',26,9,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,12,1,106,0),
('VAL0_3',cur_dir+'/main.cpp',27,9,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,13,1,106,0),
('check',cur_dir+'/main.cpp',29,17,clang.cindex.CursorKind.FUNCTION_DECL.value,0,1,2,0),
('VAL0_0',cur_dir+'/main.cpp',31,42,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',24,9),
('VAL0_1',cur_dir+'/main.cpp',31,50,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',25,9),
('VAL0_2',cur_dir+'/main.cpp',31,58,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',26,9),
('VAL0_3',cur_dir+'/main.cpp',31,66,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',27,9),
('C0',cur_dir+'/main.cpp',33,11,clang.cindex.CursorKind.CLASS_DECL.value,0,1, 105, 0),
('VAL0_0',cur_dir+'/main.cpp',36,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,20,1, 106, 0),
('VAL0_1',cur_dir+'/main.cpp',37,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,21,1, 106, 0),
('VAL0_2',cur_dir+'/main.cpp',38,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,22,1, 106, 0),
('VAL0_3',cur_dir+'/main.cpp',39,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,23,1, 106, 0),
('check',cur_dir+'/main.cpp',41,14,clang.cindex.CursorKind.CXX_METHOD.value,0,1, 2, 0),
('VAL0_0',cur_dir+'/main.cpp',43,50,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',36,13),
('VAL0_1',cur_dir+'/main.cpp',43,58,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',37,13),
('VAL0_2',cur_dir+'/main.cpp',43,66,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',38,13),
('VAL0_3',cur_dir+'/main.cpp',43,74,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',39,13),
('C1',cur_dir+'/main.cpp',46,11,clang.cindex.CursorKind.CLASS_DECL.value,0,1, 105, 0),
('C0',cur_dir+'/main.cpp',46,23,clang.cindex.CursorKind.TYPE_REF.value,cur_dir+'/main.cpp',33,11),
('VAL0_0',cur_dir+'/main.cpp',50,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,30,1, 106, 0),
('VAL0_1',cur_dir+'/main.cpp',51,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,31,1, 106, 0),
('VAL0_2',cur_dir+'/main.cpp',52,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,32,1, 106, 0),
('VAL0_3',cur_dir+'/main.cpp',53,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,33,1, 106, 0),
('check',cur_dir+'/main.cpp',55,14,clang.cindex.CursorKind.CXX_METHOD.value,0,1, 2, 0),
('VAL0_0',cur_dir+'/main.cpp',57,50,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',50,13),
('VAL0_1',cur_dir+'/main.cpp',57,58,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',51,13),
('VAL0_2',cur_dir+'/main.cpp',57,66,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',52,13),
('VAL0_3',cur_dir+'/main.cpp',57,74,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',53,13),
('NS1',cur_dir+'/main.cpp',62,11,clang.cindex.CursorKind.NAMESPACE.value,0,1, 0, 0),
('VAL0_0',cur_dir+'/main.cpp',64,9,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,40,1, 106, 0),
('VAL0_1',cur_dir+'/main.cpp',65,9,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,41,1, 106, 0),
('VAL0_2',cur_dir+'/main.cpp',66,9,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,42,1, 106, 0),
('VAL0_3',cur_dir+'/main.cpp',67,9,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,43,1, 106, 0),
('check',cur_dir+'/main.cpp',69,17,clang.cindex.CursorKind.FUNCTION_DECL.value,0,1, 2, 0),
('VAL0_0',cur_dir+'/main.cpp',71,42,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',64,9),
('VAL0_1',cur_dir+'/main.cpp',71,50,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',65,9),
('VAL0_2',cur_dir+'/main.cpp',71,58,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',66,9),
('VAL0_3',cur_dir+'/main.cpp',71,66,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',67,9),
('C0',cur_dir+'/main.cpp',73,11,clang.cindex.CursorKind.CLASS_DECL.value,0,1, 105, 0),
('VAL0_0',cur_dir+'/main.cpp',76,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,50,1, 106, 0),
('VAL0_1',cur_dir+'/main.cpp',77,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,51,1, 106, 0),
('VAL0_2',cur_dir+'/main.cpp',78,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,52,1, 106, 0),
('VAL0_3',cur_dir+'/main.cpp',79,13,clang.cindex.CursorKind.ENUM_CONSTANT_DECL.value,53,1, 106, 0),
('check',cur_dir+'/main.cpp',81,14,clang.cindex.CursorKind.CXX_METHOD.value,0,1, 2, 0),
('VAL0_0',cur_dir+'/main.cpp',83,50,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',76,13),
('VAL0_1',cur_dir+'/main.cpp',83,58,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',77,13),
('VAL0_2',cur_dir+'/main.cpp',83,66,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',78,13),
('VAL0_3',cur_dir+'/main.cpp',83,74,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',79,13),
('C1',cur_dir+'/main.cpp',86,11,clang.cindex.CursorKind.CLASS_DECL.value,0,1, 105, 0),
('C0',cur_dir+'/main.cpp',86,23,clang.cindex.CursorKind.TYPE_REF.value,cur_dir+'/main.cpp',73,11),
('check',cur_dir+'/main.cpp',89,14,clang.cindex.CursorKind.CXX_METHOD.value,0,1, 2, 0),
('VAL0_0',cur_dir+'/main.cpp',91,50,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',76,13),
('VAL0_1',cur_dir+'/main.cpp',91,58,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',77,13),
('VAL0_2',cur_dir+'/main.cpp',91,66,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',78,13),
('VAL0_3',cur_dir+'/main.cpp',91,74,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',79,13),
('main',cur_dir+'/main.cpp',96,5,clang.cindex.CursorKind.FUNCTION_DECL.value,0,1, 17, 0),
('NS0',cur_dir+'/main.cpp',98,5,clang.cindex.CursorKind.NAMESPACE_REF.value,cur_dir+'/main.cpp',22,11),
('C0',cur_dir+'/main.cpp',98,10,clang.cindex.CursorKind.TYPE_REF.value,cur_dir+'/main.cpp',33,11),
('c00',cur_dir+'/main.cpp',98,13,clang.cindex.CursorKind.VAR_DECL.value,0,1, 105, 0),
('NS0',cur_dir+'/main.cpp',99,5,clang.cindex.CursorKind.NAMESPACE_REF.value,cur_dir+'/main.cpp',22,11),
('C1',cur_dir+'/main.cpp',99,10,clang.cindex.CursorKind.TYPE_REF.value,cur_dir+'/main.cpp',46,11),
('c01',cur_dir+'/main.cpp',99,13,clang.cindex.CursorKind.VAR_DECL.value,0,1, 105, 0),
('NS1',cur_dir+'/main.cpp',100,5,clang.cindex.CursorKind.NAMESPACE_REF.value,cur_dir+'/main.cpp',62,11),
('C0',cur_dir+'/main.cpp',100,10,clang.cindex.CursorKind.TYPE_REF.value,cur_dir+'/main.cpp',73,11),
('c10',cur_dir+'/main.cpp',100,13,clang.cindex.CursorKind.VAR_DECL.value,0,1, 105, 0),
('NS1',cur_dir+'/main.cpp',101,5,clang.cindex.CursorKind.NAMESPACE_REF.value,cur_dir+'/main.cpp',62,11),
('C1',cur_dir+'/main.cpp',101,10,clang.cindex.CursorKind.TYPE_REF.value,cur_dir+'/main.cpp',86,11),
('c11',cur_dir+'/main.cpp',101,13,clang.cindex.CursorKind.VAR_DECL.value,0,1, 105, 0),
('check',cur_dir+'/main.cpp',102,7,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',17,13),
('NS0',cur_dir+'/main.cpp',103,5,clang.cindex.CursorKind.NAMESPACE_REF.value,cur_dir+'/main.cpp',22,11),
('check',cur_dir+'/main.cpp',103,10,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',29,17),
('NS1',cur_dir+'/main.cpp',104,5,clang.cindex.CursorKind.NAMESPACE_REF.value,cur_dir+'/main.cpp',62,11),
('check',cur_dir+'/main.cpp',104,10,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',69,17),
('c00',cur_dir+'/main.cpp',105,5,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',98,13),
('check',cur_dir+'/main.cpp',105,9,clang.cindex.CursorKind.MEMBER_REF_EXPR.value,cur_dir+'/main.cpp',41,14),
('c01',cur_dir+'/main.cpp',106,5,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',99,13),
('check',cur_dir+'/main.cpp',106,9,clang.cindex.CursorKind.MEMBER_REF_EXPR.value,cur_dir+'/main.cpp',55,14),
('c10',cur_dir+'/main.cpp',107,5,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',100,13),
('check',cur_dir+'/main.cpp',107,9,clang.cindex.CursorKind.MEMBER_REF_EXPR.value,cur_dir+'/main.cpp',81,14),
('c11',cur_dir+'/main.cpp',108,5,clang.cindex.CursorKind.DECL_REF_EXPR.value,cur_dir+'/main.cpp',101,13),
('check',cur_dir+'/main.cpp',108,9,clang.cindex.CursorKind.MEMBER_REF_EXPR.value,cur_dir+'/main.cpp',89,14),
]
db = cxxtags.get_db_by_file_name(db_dir, "main.cpp")
i = 0
for q in q_list:
test_one(db, q, a_list[i])
i+=1
if err == 0:
print "OK"
else:
print "ERR: %d"%(err)
exit(err)
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
From a system-installed copy of the toolchain, packages all the required bits
into a .zip file.
It assumes default install locations for tools, in particular:
- C:\Program Files (x86)\Microsoft Visual Studio 12.0\...
- C:\Program Files (x86)\Windows Kits\8.1\...
1. Start from a fresh Win7 VM image.
2. Install VS Pro. Deselect everything except MFC.
3. Install Windows 8 SDK. Select only the Windows SDK and Debugging Tools for
Windows.
4. Run this script, which will build a <sha1>.zip.
Express is not yet supported by this script, but patches welcome (it's not too
useful as the resulting zip can't be redistributed, and most will presumably
have a Pro license anyway).
"""
import os
import shutil
import sys
import tempfile
import zipfile
import get_toolchain_if_necessary
VS_VERSION = None
def BuildFileList():
result = []
# Subset of VS corresponding roughly to VC.
paths = [
'DIA SDK/bin',
'DIA SDK/idl',
'DIA SDK/include',
'DIA SDK/lib',
'VC/atlmfc',
'VC/bin',
'VC/crt',
'VC/include',
'VC/lib',
'VC/redist',
]
if VS_VERSION == '2013':
paths += [
('VC/redist/x86/Microsoft.VC120.CRT', 'sys32'),
('VC/redist/x86/Microsoft.VC120.MFC', 'sys32'),
('VC/redist/Debug_NonRedist/x86/Microsoft.VC120.DebugCRT', 'sys32'),
('VC/redist/Debug_NonRedist/x86/Microsoft.VC120.DebugMFC', 'sys32'),
('VC/redist/x64/Microsoft.VC120.CRT', 'sys64'),
('VC/redist/x64/Microsoft.VC120.MFC', 'sys64'),
('VC/redist/Debug_NonRedist/x64/Microsoft.VC120.DebugCRT', 'sys64'),
('VC/redist/Debug_NonRedist/x64/Microsoft.VC120.DebugMFC', 'sys64'),
]
elif VS_VERSION == '2015':
paths += [
('VC/redist/x86/Microsoft.VC140.CRT', 'sys32'),
('VC/redist/x86/Microsoft.VC140.MFC', 'sys32'),
('VC/redist/debug_nonredist/x86/Microsoft.VC140.DebugCRT', 'sys32'),
('VC/redist/debug_nonredist/x86/Microsoft.VC140.DebugMFC', 'sys32'),
('VC/redist/x64/Microsoft.VC140.CRT', 'sys64'),
('VC/redist/x64/Microsoft.VC140.MFC', 'sys64'),
('VC/redist/debug_nonredist/x64/Microsoft.VC140.DebugCRT', 'sys64'),
('VC/redist/debug_nonredist/x64/Microsoft.VC140.DebugMFC', 'sys64'),
]
else:
raise ValueError('VS_VERSION %s' % VS_VERSION)
if VS_VERSION == '2013':
vs_path = r'C:\Program Files (x86)\Microsoft Visual Studio 12.0'
else:
vs_path = r'C:\Program Files (x86)\Microsoft Visual Studio 14.0'
for path in paths:
src = path[0] if isinstance(path, tuple) else path
combined = os.path.join(vs_path, src)
assert os.path.exists(combined) and os.path.isdir(combined)
for root, _, files in os.walk(combined):
for f in files:
final_from = os.path.normpath(os.path.join(root, f))
if isinstance(path, tuple):
result.append(
(final_from, os.path.normpath(os.path.join(path[1], f))))
else:
assert final_from.startswith(vs_path)
dest = final_from[len(vs_path) + 1:]
if VS_VERSION == '2013' and dest.lower().endswith('\\xtree'):
# Patch for C4702 in xtree on VS2013. http://crbug.com/346399.
(handle, patched) = tempfile.mkstemp()
with open(final_from, 'rb') as unpatched_f:
unpatched_contents = unpatched_f.read()
os.write(handle,
unpatched_contents.replace('warning(disable: 4127)',
'warning(disable: 4127 4702)'))
result.append((patched, dest))
else:
result.append((final_from, dest))
# Just copy the whole SDK.
sdk_path = r'C:\Program Files (x86)\Windows Kits\8.1'
for root, _, files in os.walk(sdk_path):
for f in files:
combined = os.path.normpath(os.path.join(root, f))
to = os.path.join('win_sdk', combined[len(sdk_path) + 1:])
result.append((combined, to))
if VS_VERSION == '2015':
for ucrt_path in (
(r'C:\Program Files (x86)\Windows Kits\10\Include', 'Include'),
(r'C:\Program Files (x86)\Windows Kits\10\Lib', 'Lib'),
(r'C:\Program Files (x86)\Windows Kits\10\Source', 'Source')):
src, target = ucrt_path
for root, _, files in os.walk(src):
for f in files:
combined = os.path.normpath(os.path.join(root, f))
to = os.path.join('ucrt', target, combined[len(src) + 1:])
result.append((combined, to))
system_crt_files = [
'api-ms-win-core-file-l1-2-0.dll',
'api-ms-win-core-file-l2-1-0.dll',
'api-ms-win-core-localization-l1-2-0.dll',
'api-ms-win-core-processthreads-l1-1-1.dll',
'api-ms-win-core-synch-l1-2-0.dll',
'api-ms-win-core-timezone-l1-1-0.dll',
'api-ms-win-core-xstate-l2-1-0.dll',
'api-ms-win-crt-conio-l1-1-0.dll',
'api-ms-win-crt-convert-l1-1-0.dll',
'api-ms-win-crt-environment-l1-1-0.dll',
'api-ms-win-crt-filesystem-l1-1-0.dll',
'api-ms-win-crt-heap-l1-1-0.dll',
'api-ms-win-crt-locale-l1-1-0.dll',
'api-ms-win-crt-math-l1-1-0.dll',
'api-ms-win-crt-multibyte-l1-1-0.dll',
'api-ms-win-crt-private-l1-1-0.dll',
'api-ms-win-crt-process-l1-1-0.dll',
'api-ms-win-crt-runtime-l1-1-0.dll',
'api-ms-win-crt-stdio-l1-1-0.dll',
'api-ms-win-crt-string-l1-1-0.dll',
'api-ms-win-crt-time-l1-1-0.dll',
'api-ms-win-crt-utility-l1-1-0.dll',
'api-ms-win-eventing-provider-l1-1-0.dll',
'ucrtbase.dll',
'ucrtbased.dll',
]
for system_crt_file in system_crt_files:
result.append((os.path.join(r'C:\Windows\SysWOW64', system_crt_file),
os.path.join('sys32', system_crt_file)))
result.append((os.path.join(r'C:\Windows\Sysnative', system_crt_file),
os.path.join('sys64', system_crt_file)))
# Generically drop all arm stuff that we don't need.
return [(f, t) for f, t in result if 'arm\\' not in f.lower() and
'arm64\\' not in f.lower()]
def GenerateSetEnvCmd(target_dir):
"""Generate a batch file that gyp expects to exist to set up the compiler
environment.
This is normally generated by a full install of the SDK, but we
do it here manually since we do not do a full install."""
with open(os.path.join(
target_dir, r'win_sdk\bin\SetEnv.cmd'), 'w') as f:
f.write('@echo off\n'
':: Generated by win_toolchain\\package_from_installed.py.\n'
# Common to x86 and x64
'set PATH=%~dp0..\\..\\Common7\\IDE;%PATH%\n'
'set INCLUDE=%~dp0..\\..\\win_sdk\\Include\\um;'
'%~dp0..\\..\\win_sdk\\Include\\shared;'
'%~dp0..\\..\\win_sdk\\Include\\winrt;'
'%~dp0..\\..\\ucrt\\Include\\10.0.10056.0\\ucrt;'
'%~dp0..\\..\\VC\\include;'
'%~dp0..\\..\\VC\\atlmfc\\include\n'
'if "%1"=="/x64" goto x64\n')
# x86. Always use amd64_x86 cross, not x86 on x86.
f.write('set PATH=%~dp0..\\..\\win_sdk\\bin\\x86;'
'%~dp0..\\..\\VC\\bin\\amd64_x86;'
'%~dp0..\\..\\VC\\bin\\amd64;' # Needed for mspdb1x0.dll.
'%PATH%\n')
f.write('set LIB=%~dp0..\\..\\VC\\lib;'
'%~dp0..\\..\\win_sdk\\Lib\\winv6.3\\um\\x86;'
'%~dp0..\\..\\ucrt\\Lib\\10.0.10056.0\\ucrt\\x86;'
'%~dp0..\\..\\VC\\atlmfc\\lib\n'
'goto :EOF\n')
# x64.
f.write(':x64\n'
'set PATH=%~dp0..\\..\\win_sdk\\bin\\x64;'
'%~dp0..\\..\\VC\\bin\\amd64;'
'%PATH%\n')
f.write('set LIB=%~dp0..\\..\\VC\\lib\\amd64;'
'%~dp0..\\..\\win_sdk\\Lib\\winv6.3\\um\\x64;'
'%~dp0..\\..\\ucrt\\Lib\\10.0.10056.0\\ucrt\\x64;'
'%~dp0..\\..\\VC\\atlmfc\\lib\\amd64\n')
def AddEnvSetup(files):
"""We need to generate this file in the same way that the "from pieces"
script does, so pull that in here."""
tempdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tempdir, 'win_sdk', 'bin'))
GenerateSetEnvCmd(tempdir)
files.append((os.path.join(tempdir, 'win_sdk', 'bin', 'SetEnv.cmd'),
'win_sdk\\bin\\SetEnv.cmd'))
vs_version_file = os.path.join(tempdir, 'VS_VERSION')
with open(vs_version_file, 'wb') as version:
print >>version, VS_VERSION
files.append((vs_version_file, 'VS_VERSION'))
def RenameToSha1(output):
"""Determine the hash in the same way that the unzipper does to rename the
# .zip file."""
print 'Extracting to determine hash...'
tempdir = tempfile.mkdtemp()
old_dir = os.getcwd()
os.chdir(tempdir)
rel_dir = 'vs_files'
with zipfile.ZipFile(
os.path.join(old_dir, output), 'r', zipfile.ZIP_DEFLATED, True) as zf:
zf.extractall(rel_dir)
print 'Hashing...'
sha1 = get_toolchain_if_necessary.CalculateHash(rel_dir)
os.chdir(old_dir)
shutil.rmtree(tempdir)
final_name = sha1 + '.zip'
os.rename(output, final_name)
print 'Renamed %s to %s.' % (output, final_name)
def main():
if len(sys.argv) != 2 or sys.argv[1] not in ('2013', '2015'):
print 'Usage: package_from_installed.py 2013|2015'
return 1
global VS_VERSION
VS_VERSION = sys.argv[1]
print 'Building file list...'
files = BuildFileList()
AddEnvSetup(files)
if False:
for f in files:
print f[0], '->', f[1]
return 0
output = 'out.zip'
if os.path.exists(output):
os.unlink(output)
count = 0
with zipfile.ZipFile(output, 'w', zipfile.ZIP_DEFLATED, True) as zf:
for disk_name, archive_name in files:
sys.stdout.write('\r%d/%d ...%s' % (count, len(files), disk_name[-40:]))
sys.stdout.flush()
count += 1
zf.write(disk_name, archive_name)
sys.stdout.write('\rWrote to %s.%s\n' % (output, ' '*50))
sys.stdout.flush()
RenameToSha1(output)
return 0
if __name__ == '__main__':
sys.exit(main())
| |
import unittest
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer.testing import attr
from chainer.testing import condition
if cuda.available:
cuda.init()
class BinaryOpTestBase(object):
def make_data(self):
raise NotImplementedError()
def setUp(self):
self.x1, self.x2, self.gy = self.make_data()
def check_forward(self, op, x1_data, x2_data):
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
y = op(x1, x2)
if isinstance(y.data, cuda.GPUArray):
self.assertTrue(hasattr(y.data.gpudata, 'device'))
gradient_check.assert_allclose(op(self.x1, self.x2), y.data)
def forward_cpu(self, op):
self.check_forward(op, self.x1, self.x2)
@condition.retry(3)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__radd__(x))
@condition.retry(3)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rsub__(x))
@condition.retry(3)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rmul__(x))
@condition.retry(3)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rtruediv__(x))
@condition.retry(3)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rpow__(x))
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2))
@attr.gpu
@condition.retry(3)
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__radd__(x))
@attr.gpu
@condition.retry(3)
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rsub__(x))
@attr.gpu
@condition.retry(3)
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rmul__(x))
@attr.gpu
@condition.retry(3)
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rtruediv__(x))
@attr.gpu
@condition.retry(3)
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rpow__(x))
@attr.gpu
def test_add_constant_allocation(self):
x = 0
y = chainer.Variable(cuda.ones((1,)))
z = y + x
self.assertEqual(1, z.data.get()[0])
self.assertTrue(hasattr(z.data.gpudata, 'device'))
def check_backward(self, op, x1_data, x2_data, y_grad, atol):
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
y = op(x1, x2)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x1.data, x2.data))
gx1, gx2 = gradient_check.numerical_grad(
f, (x1.data, x2.data), (y.grad,))
gradient_check.assert_allclose(gx1, x1.grad, atol=atol)
gradient_check.assert_allclose(gx2, x2.grad, atol=atol)
def backward_cpu(self, op, atol=1e-5):
self.check_backward(op, self.x1, self.x2, self.gy, atol)
@condition.retry(3)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y, atol=1e-4)
def backward_gpu(self, op, atol=1e-5):
self.check_backward(
op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy), atol)
@attr.gpu
@condition.retry(3)
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y, atol=1e-4)
class TestBinaryOpSimple(BinaryOpTestBase, unittest.TestCase):
def make_data(self):
x1 = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
x2 = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x1, x2, gy
class TestBinaryOpZeroDimension(BinaryOpTestBase, unittest.TestCase):
def make_data(self):
x1 = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
x2 = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
return x1, x2, gy
class TestBinaryOpConstant(unittest.TestCase):
def _test_constant(self, func):
x_data = numpy.array(1, numpy.float32)
x1 = chainer.Variable(x_data)
y1 = func(x1, 1)
self.assertEqual(y1.data.dtype, numpy.float32)
y1.backward()
self.assertEqual(x1.grad.dtype, numpy.float32)
x2 = chainer.Variable(x_data)
y2 = func(x2, 1.0)
self.assertEqual(y2.data.dtype, numpy.float32)
y2.backward()
self.assertEqual(x2.grad.dtype, numpy.float32)
x3 = chainer.Variable(x_data)
y3 = func(x3, numpy.int64(1))
self.assertEqual(y3.data.dtype, numpy.float32)
y3.backward()
self.assertEqual(x3.grad.dtype, numpy.float32)
x4 = chainer.Variable(x_data)
y4 = func(x4, numpy.float64(1.0))
self.assertEqual(y4.data.dtype, numpy.float32)
y4.backward()
self.assertEqual(x4.grad.dtype, numpy.float32)
def test_add_constnt(self):
self._test_constant(lambda x, y: x + y)
def test_radd_constnt(self):
self._test_constant(lambda x, y: y + x)
def test_sub_constnt(self):
self._test_constant(lambda x, y: x - y)
def test_rsub_constnt(self):
self._test_constant(lambda x, y: y - x)
def test_mul_constnt(self):
self._test_constant(lambda x, y: x * y)
def test_rmul_constnt(self):
self._test_constant(lambda x, y: y * x)
def test_div_constnt(self):
self._test_constant(lambda x, y: x / y)
def test_rdiv_constnt(self):
self._test_constant(lambda x, y: y / x)
def test_pow_constnt(self):
self._test_constant(lambda x, y: x ** y)
def test_rpow_constnt(self):
self._test_constant(lambda x, y: y ** x)
class VariableConstantOpTestBase(object):
def make_date(self):
raise NotImplementedError()
def setUp(self):
self.x, self.gy, self.value = self.make_data()
def check_forward(self, op, x_data):
x = chainer.Variable(x_data)
y = op(x, self.value)
gradient_check.assert_allclose(
op(self.x, self.value), y.data, atol=1e-7, rtol=1e-7)
def forward_cpu(self, op):
self.check_forward(op, self.x)
@condition.retry(3)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x)
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
@condition.retry(3)
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x)
def check_backward(self, op, x_data, y_grad):
x = chainer.Variable(x_data)
y = op(x, self.value)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(gx, x.grad)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
@condition.retry(3)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x)
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_rsub_backward_gpu(self):
self.backward_gpu(lambda x, y: y - x)
@attr.gpu
@condition.retry(3)
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x)
class TestVariableConstantOpSimple(VariableConstantOpTestBase,
unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
value = .5
return x, gy, value
class TestVariableConstantOpZeroDimension(VariableConstantOpTestBase,
unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
value = .5
return x, gy, value
class TestVariableConstantArrayOp(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.value = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def check_forward(self, op, x_data, gpu, positive):
value = self.value
if positive:
value = numpy.abs(value)
v = value
if gpu:
v = cuda.to_gpu(v)
x = chainer.Variable(x_data)
y = op(x, v)
gradient_check.assert_allclose(
op(self.x, value), y.data, atol=1e-6, rtol=1e-6)
def forward_cpu(self, op, positive=False):
self.check_forward(op, self.x, False, positive)
@condition.retry(3)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x, positive=True)
def forward_gpu(self, op, positive=False):
self.check_forward(op, cuda.to_gpu(self.x), True, positive)
@attr.gpu
@condition.retry(3)
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
@condition.retry(3)
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x, positive=True)
def check_backward(self, op, x_data, y_grad, gpu, positive):
value = self.value
if positive:
value = numpy.abs(value)
if gpu:
value = cuda.to_gpu(value)
x = chainer.Variable(x_data)
y = op(x, value)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(gx, x.grad, atol=1e-4, rtol=1e-4)
def backward_cpu(self, op, positive=False):
self.check_backward(op, self.x, self.gy, False, positive)
@condition.retry(3)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
@condition.retry(3)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
@condition.retry(3)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
@condition.retry(3)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
@condition.retry(3)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
@condition.retry(3)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
@condition.retry(3)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
@condition.retry(3)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
@condition.retry(3)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
@condition.retry(3)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x, positive=True)
def backward_gpu(self, op, positive=False):
self.check_backward(
op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy), True, positive)
@attr.gpu
@condition.retry(3)
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
@condition.retry(3)
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
@condition.retry(3)
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
@condition.retry(3)
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
@condition.retry(3)
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
@condition.retry(3)
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
@condition.retry(3)
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
@condition.retry(3)
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
@condition.retry(3)
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x, positive=True)
class UnaryFunctionsTestBase(object):
def make_data(self):
raise NotImplementedError()
def setUp(self):
self.x, self.gy = self.make_data()
def check_forward(self, op, op_np, x_data):
x = chainer.Variable(x_data)
y = op(x)
gradient_check.assert_allclose(
op_np(self.x), y.data, atol=1e-7, rtol=1e-7)
def forward_cpu(self, op, op_np):
self.check_forward(op, op_np, self.x)
@condition.retry(3)
def test_neg_forward_cpu(self):
self.forward_cpu(lambda x: -x, lambda x: -x)
@condition.retry(3)
def test_abs_forward_cpu(self):
self.forward_cpu(lambda x: abs(x), lambda x: abs(x))
@condition.retry(3)
def test_exp_forward_cpu(self):
self.forward_cpu(F.exp, numpy.exp)
@condition.retry(3)
def test_log_forward_cpu(self):
self.forward_cpu(F.log, numpy.log)
def forward_gpu(self, op, op_np):
self.check_forward(op, op_np, cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_neg_forward_gpu(self):
self.forward_gpu(lambda x: -x, lambda x: -x)
@attr.gpu
@condition.retry(3)
def test_abs_forward_gpu(self):
self.forward_gpu(lambda x: abs(x), lambda x: abs(x))
@attr.gpu
@condition.retry(3)
def test_exp_forward_gpu(self):
self.forward_gpu(F.exp, numpy.exp)
@attr.gpu
@condition.retry(3)
def test_log_forward_gpu(self):
self.forward_gpu(F.log, numpy.log)
def check_backward(self, op, x_data, y_grad):
x = chainer.Variable(x_data)
y = op(x)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data,))
gx, = gradient_check.numerical_grad(f, (x.data,), (y.grad,))
gradient_check.assert_allclose(gx, x.grad)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
@condition.retry(3)
def test_neg_backward_cpu(self):
self.backward_cpu(lambda x: -x)
@condition.retry(3)
def test_abs_backward_cpu(self):
self.backward_cpu(lambda x: abs(x))
@condition.retry(3)
def test_exp_backward_cpu(self):
self.backward_cpu(F.exp)
@condition.retry(3)
def test_log_backward_cpu(self):
self.backward_cpu(F.log)
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_neg_backward_gpu(self):
self.backward_gpu(lambda x: -x)
@attr.gpu
@condition.retry(3)
def test_abs_backward_gpu(self):
self.backward_gpu(lambda x: abs(x))
@attr.gpu
@condition.retry(3)
def test_exp_backward_gpu(self):
self.backward_gpu(F.exp)
@attr.gpu
@condition.retry(3)
def test_log_backward_gpu(self):
self.backward_gpu(F.log)
class TestUnaryFunctionsSimple(UnaryFunctionsTestBase, unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, (3, 2)).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x, gy
class TestUnaryFunctionsZeroDimension(UnaryFunctionsTestBase,
unittest.TestCase):
def make_data(self):
x = numpy.random.uniform(.5, 1, ()).astype(numpy.float32)
gy = numpy.random.uniform(-1, 1, ()).astype(numpy.float32)
return x, gy
| |
# system configuration generated and used by the sysconfig module
build_time_vars = {'AC_APPLE_UNIVERSAL_BUILD': 1,
'AIX_GENUINE_CPLUSPLUS': 0,
'AR': 'ar',
'ARFLAGS': 'rc',
'ASDLGEN': './Parser/asdl_c.py',
'ASDLGEN_FILES': './Parser/asdl.py ./Parser/asdl_c.py',
'AST_ASDL': './Parser/Python.asdl',
'AST_C': 'Python/Python-ast.c',
'AST_C_DIR': 'Python',
'AST_H': 'Include/Python-ast.h',
'AST_H_DIR': 'Include',
'ATHEOS_THREADS': 0,
'BASECFLAGS': '-fno-strict-aliasing -fno-common -dynamic',
'BASEMODLIBS': '',
'BEOS_THREADS': 0,
'BINDIR': '/usr/local/bin',
'BINLIBDEST': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7',
'BLDLIBRARY': '',
'BLDSHARED': 'cc -bundle -undefined dynamic_lookup -arch x86_64 -arch i386 -Wl,-F.',
'BUILDEXE': '.exe',
'BUILDPYTHON': 'python.exe',
'CC': 'cc',
'CCSHARED': '-arch x86_64 -arch i386 -pipe',
'CFLAGS': '-fno-strict-aliasing -fno-common -dynamic -arch x86_64 -arch i386 -g -Os -pipe -fno-common -fno-strict-aliasing -fwrapv -mno-fused-madd -DENABLE_DTRACE -DMACOSX -DNDEBUG -Wall -Wstrict-prototypes -Wshorten-64-to-32 -DNDEBUG -g -fwrapv -Os -Wall -Wstrict-prototypes -DENABLE_DTRACE',
'CFLAGSFORSHARED': '-arch x86_64 -arch i386 -pipe',
'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in',
'CONFIG_ARGS': "'--prefix=/usr' '--mandir=/usr/share/man' '--infodir=/usr/share/info' '--disable-dependency-tracking' '--enable-ipv6' '--with-system-expat' '--with-threads' '--enable-framework=/System/Library/Frameworks' '--enable-toolbox-glue' '--with-system-ffi' 'CC=cc' 'CFLAGS=-arch x86_64 -arch i386 -g -Os -pipe -fno-common -fno-strict-aliasing -fwrapv -mno-fused-madd -DENABLE_DTRACE -DMACOSX -DNDEBUG -Wall -Wstrict-prototypes -Wshorten-64-to-32' 'LDFLAGS=-arch x86_64 -arch i386 -Wl,-F.'",
'CONFINCLUDEDIR': '/System/Library/Frameworks/Python.framework/Versions/2.7/include',
'CONFINCLUDEPY': '/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7',
'COREPYTHONPATH': ':plat-darwin:plat-mac:plat-mac/lib-scriptpackages:../../Extras/lib/python:lib-tk:lib-old',
'CPPFLAGS': '-I. -IInclude -I./Include',
'CXX': 'c++',
'C_THREADS': 0,
'DESTDIRS': '/System/Library/Frameworks/Python.framework/Versions/2.7 /System/Library/Frameworks/Python.framework/Versions/2.7/lib /System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7 /System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-dynload',
'DESTLIB': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7',
'DESTPATH': '',
'DESTSHARED': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-dynload',
'DIRMODE': 755,
'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in Include Lib Misc Demo Ext-dummy',
'DISTDIRS': 'Include Lib Misc Demo Ext-dummy',
'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in',
'DLINCLDIR': '.',
'DLLLIBRARY': '',
'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0,
'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0,
'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1,
'DYNLOADFILE': 'dynload_shlib.o',
'ENABLE_IPV6': 1,
'EXE': '',
'EXEMODE': 755,
'EXTRAMACHDEPPATH': ':plat-mac:plat-mac/lib-scriptpackages',
'EXTRAPLATDIR': 'plat-mac plat-mac/Carbon plat-mac/lib-scriptpackages \\',
'EXTRASPATH': ':../../Extras/lib/python',
'EXTRATESTOPTS': '',
'EXTRA_CFLAGS': '-DENABLE_DTRACE',
'FILEMODE': 644,
'FLOCK_NEEDS_LIBBSD': 0,
'GETPGRP_HAVE_ARG': 0,
'GETTIMEOFDAY_NO_TZ': 0,
'GLHACK': '-Dclear=__GLclear',
'GNULD': 'no',
'GRAMMAR_C': 'Python/graminit.c',
'GRAMMAR_H': 'Include/graminit.h',
'GRAMMAR_INPUT': './Grammar/Grammar',
'HAVE_ACOSH': 1,
'HAVE_ADDRINFO': 1,
'HAVE_ALARM': 1,
'HAVE_ALTZONE': 0,
'HAVE_ASINH': 1,
'HAVE_ASM_TYPES_H': 0,
'HAVE_ATANH': 1,
'HAVE_ATTRIBUTE_FORMAT_PARSETUPLE': 0,
'HAVE_BIND_TEXTDOMAIN_CODESET': 0,
'HAVE_BLUETOOTH_BLUETOOTH_H': 0,
'HAVE_BLUETOOTH_H': 0,
'HAVE_BROKEN_NICE': 0,
'HAVE_BROKEN_PIPE_BUF': 0,
'HAVE_BROKEN_POLL': 0,
'HAVE_BROKEN_POSIX_SEMAPHORES': 0,
'HAVE_BROKEN_PTHREAD_SIGMASK': 0,
'HAVE_BROKEN_SEM_GETVALUE': 1,
'HAVE_BROKEN_UNSETENV': 0,
'HAVE_C99_BOOL': 1,
'HAVE_CHFLAGS': 1,
'HAVE_CHOWN': 1,
'HAVE_CHROOT': 1,
'HAVE_CLOCK': 1,
'HAVE_CONFSTR': 1,
'HAVE_CONIO_H': 0,
'HAVE_COPYSIGN': 1,
'HAVE_CTERMID': 1,
'HAVE_CTERMID_R': 1,
'HAVE_CURSES_H': 1,
'HAVE_CURSES_IS_TERM_RESIZED': 1,
'HAVE_CURSES_RESIZETERM': 1,
'HAVE_CURSES_RESIZE_TERM': 1,
'HAVE_DECL_ISFINITE': 1,
'HAVE_DECL_ISINF': 1,
'HAVE_DECL_ISNAN': 1,
'HAVE_DECL_TZNAME': 0,
'HAVE_DEVICE_MACROS': 1,
'HAVE_DEV_PTC': 0,
'HAVE_DEV_PTMX': 1,
'HAVE_DIRECT_H': 0,
'HAVE_DIRENT_H': 1,
'HAVE_DLFCN_H': 1,
'HAVE_DLOPEN': 1,
'HAVE_DUP2': 1,
'HAVE_DYNAMIC_LOADING': 1,
'HAVE_EPOLL': 0,
'HAVE_ERF': 1,
'HAVE_ERFC': 1,
'HAVE_ERRNO_H': 1,
'HAVE_EXECV': 1,
'HAVE_EXPM1': 1,
'HAVE_FCHDIR': 1,
'HAVE_FCHMOD': 1,
'HAVE_FCHOWN': 1,
'HAVE_FCNTL_H': 1,
'HAVE_FDATASYNC': 0,
'HAVE_FINITE': 1,
'HAVE_FLOCK': 1,
'HAVE_FORK': 1,
'HAVE_FORKPTY': 1,
'HAVE_FPATHCONF': 1,
'HAVE_FSEEK64': 0,
'HAVE_FSEEKO': 1,
'HAVE_FSTATVFS': 1,
'HAVE_FSYNC': 1,
'HAVE_FTELL64': 0,
'HAVE_FTELLO': 1,
'HAVE_FTIME': 1,
'HAVE_FTRUNCATE': 1,
'HAVE_GAI_STRERROR': 1,
'HAVE_GAMMA': 1,
'HAVE_GCC_ASM_FOR_X87': 1,
'HAVE_GETADDRINFO': 1,
'HAVE_GETCWD': 1,
'HAVE_GETC_UNLOCKED': 1,
'HAVE_GETGROUPS': 1,
'HAVE_GETHOSTBYNAME': 1,
'HAVE_GETHOSTBYNAME_R': 0,
'HAVE_GETHOSTBYNAME_R_3_ARG': 0,
'HAVE_GETHOSTBYNAME_R_5_ARG': 0,
'HAVE_GETHOSTBYNAME_R_6_ARG': 0,
'HAVE_GETITIMER': 1,
'HAVE_GETLOADAVG': 1,
'HAVE_GETLOGIN': 1,
'HAVE_GETNAMEINFO': 1,
'HAVE_GETPAGESIZE': 1,
'HAVE_GETPEERNAME': 1,
'HAVE_GETPGID': 1,
'HAVE_GETPGRP': 1,
'HAVE_GETPID': 1,
'HAVE_GETPRIORITY': 1,
'HAVE_GETPWENT': 1,
'HAVE_GETRESGID': 0,
'HAVE_GETRESUID': 0,
'HAVE_GETSID': 1,
'HAVE_GETSPENT': 0,
'HAVE_GETSPNAM': 0,
'HAVE_GETTIMEOFDAY': 1,
'HAVE_GETWD': 1,
'HAVE_GRP_H': 1,
'HAVE_HSTRERROR': 1,
'HAVE_HYPOT': 1,
'HAVE_IEEEFP_H': 0,
'HAVE_INET_ATON': 1,
'HAVE_INET_PTON': 1,
'HAVE_INITGROUPS': 1,
'HAVE_INT32_T': 1,
'HAVE_INT64_T': 1,
'HAVE_INTTYPES_H': 1,
'HAVE_IO_H': 0,
'HAVE_KILL': 1,
'HAVE_KILLPG': 1,
'HAVE_KQUEUE': 1,
'HAVE_LANGINFO_H': 1,
'HAVE_LARGEFILE_SUPPORT': 1,
'HAVE_LCHFLAGS': 1,
'HAVE_LCHMOD': 1,
'HAVE_LCHOWN': 1,
'HAVE_LGAMMA': 1,
'HAVE_LIBDL': 1,
'HAVE_LIBDLD': 0,
'HAVE_LIBIEEE': 0,
'HAVE_LIBINTL_H': 0,
'HAVE_LIBREADLINE': 1,
'HAVE_LIBRESOLV': 0,
'HAVE_LIBUTIL_H': 1,
'HAVE_LINK': 1,
'HAVE_LINUX_NETLINK_H': 0,
'HAVE_LINUX_TIPC_H': 0,
'HAVE_LOG1P': 1,
'HAVE_LONG_DOUBLE': 1,
'HAVE_LONG_LONG': 1,
'HAVE_LSTAT': 1,
'HAVE_MAKEDEV': 1,
'HAVE_MEMMOVE': 1,
'HAVE_MEMORY_H': 1,
'HAVE_MKFIFO': 1,
'HAVE_MKNOD': 1,
'HAVE_MKTIME': 1,
'HAVE_MREMAP': 0,
'HAVE_NCURSES_H': 1,
'HAVE_NDIR_H': 0,
'HAVE_NETPACKET_PACKET_H': 0,
'HAVE_NICE': 1,
'HAVE_OPENPTY': 1,
'HAVE_OSX105_SDK': 1,
'HAVE_PATHCONF': 1,
'HAVE_PAUSE': 1,
'HAVE_PLOCK': 0,
'HAVE_POLL': 0,
'HAVE_POLL_H': 0,
'HAVE_PROCESS_H': 0,
'HAVE_PROTOTYPES': 1,
'HAVE_PTH': 0,
'HAVE_PTHREAD_DESTRUCTOR': 0,
'HAVE_PTHREAD_H': 1,
'HAVE_PTHREAD_INIT': 0,
'HAVE_PTHREAD_SIGMASK': 1,
'HAVE_PTY_H': 0,
'HAVE_PUTENV': 1,
'HAVE_READLINK': 1,
'HAVE_REALPATH': 1,
'HAVE_RL_CALLBACK': 1,
'HAVE_RL_CATCH_SIGNAL': 0,
'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1,
'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 0,
'HAVE_RL_COMPLETION_MATCHES': 1,
'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 0,
'HAVE_RL_PRE_INPUT_HOOK': 1,
'HAVE_ROUND': 1,
'HAVE_SELECT': 1,
'HAVE_SEM_GETVALUE': 1,
'HAVE_SEM_OPEN': 1,
'HAVE_SEM_TIMEDWAIT': 0,
'HAVE_SEM_UNLINK': 1,
'HAVE_SETEGID': 1,
'HAVE_SETEUID': 1,
'HAVE_SETGID': 1,
'HAVE_SETGROUPS': 1,
'HAVE_SETITIMER': 1,
'HAVE_SETLOCALE': 1,
'HAVE_SETPGID': 1,
'HAVE_SETPGRP': 1,
'HAVE_SETREGID': 1,
'HAVE_SETRESGID': 0,
'HAVE_SETRESUID': 0,
'HAVE_SETREUID': 1,
'HAVE_SETSID': 1,
'HAVE_SETUID': 1,
'HAVE_SETVBUF': 1,
'HAVE_SHADOW_H': 0,
'HAVE_SIGACTION': 1,
'HAVE_SIGINTERRUPT': 1,
'HAVE_SIGNAL_H': 1,
'HAVE_SIGRELSE': 1,
'HAVE_SNPRINTF': 1,
'HAVE_SOCKADDR_SA_LEN': 1,
'HAVE_SOCKADDR_STORAGE': 1,
'HAVE_SOCKETPAIR': 1,
'HAVE_SPAWN_H': 1,
'HAVE_SSIZE_T': 1,
'HAVE_STATVFS': 1,
'HAVE_STAT_TV_NSEC': 0,
'HAVE_STAT_TV_NSEC2': 1,
'HAVE_STDARG_PROTOTYPES': 1,
'HAVE_STDINT_H': 1,
'HAVE_STDLIB_H': 1,
'HAVE_STRDUP': 1,
'HAVE_STRFTIME': 1,
'HAVE_STRINGS_H': 1,
'HAVE_STRING_H': 1,
'HAVE_STROPTS_H': 0,
'HAVE_STRUCT_STAT_ST_BIRTHTIME': 1,
'HAVE_STRUCT_STAT_ST_BLKSIZE': 1,
'HAVE_STRUCT_STAT_ST_BLOCKS': 1,
'HAVE_STRUCT_STAT_ST_FLAGS': 1,
'HAVE_STRUCT_STAT_ST_GEN': 1,
'HAVE_STRUCT_STAT_ST_RDEV': 1,
'HAVE_STRUCT_TM_TM_ZONE': 1,
'HAVE_ST_BLOCKS': 1,
'HAVE_SYMLINK': 1,
'HAVE_SYSCONF': 1,
'HAVE_SYSEXITS_H': 1,
'HAVE_SYS_AUDIOIO_H': 0,
'HAVE_SYS_BSDTTY_H': 0,
'HAVE_SYS_DIR_H': 0,
'HAVE_SYS_EPOLL_H': 0,
'HAVE_SYS_EVENT_H': 1,
'HAVE_SYS_FILE_H': 1,
'HAVE_SYS_LOADAVG_H': 0,
'HAVE_SYS_LOCK_H': 1,
'HAVE_SYS_MKDEV_H': 0,
'HAVE_SYS_MODEM_H': 0,
'HAVE_SYS_NDIR_H': 0,
'HAVE_SYS_PARAM_H': 1,
'HAVE_SYS_POLL_H': 1,
'HAVE_SYS_RESOURCE_H': 1,
'HAVE_SYS_SELECT_H': 1,
'HAVE_SYS_SOCKET_H': 1,
'HAVE_SYS_STATVFS_H': 1,
'HAVE_SYS_STAT_H': 1,
'HAVE_SYS_TERMIO_H': 0,
'HAVE_SYS_TIMES_H': 1,
'HAVE_SYS_TIME_H': 1,
'HAVE_SYS_TYPES_H': 1,
'HAVE_SYS_UN_H': 1,
'HAVE_SYS_UTSNAME_H': 1,
'HAVE_SYS_WAIT_H': 1,
'HAVE_TCGETPGRP': 1,
'HAVE_TCSETPGRP': 1,
'HAVE_TEMPNAM': 1,
'HAVE_TERMIOS_H': 1,
'HAVE_TERM_H': 1,
'HAVE_TGAMMA': 1,
'HAVE_THREAD_H': 0,
'HAVE_TIMEGM': 1,
'HAVE_TIMES': 1,
'HAVE_TMPFILE': 1,
'HAVE_TMPNAM': 1,
'HAVE_TMPNAM_R': 0,
'HAVE_TM_ZONE': 1,
'HAVE_TRUNCATE': 1,
'HAVE_TZNAME': 0,
'HAVE_UCS4_TCL': 0,
'HAVE_UINT32_T': 1,
'HAVE_UINT64_T': 1,
'HAVE_UINTPTR_T': 1,
'HAVE_UNAME': 1,
'HAVE_UNISTD_H': 1,
'HAVE_UNSETENV': 1,
'HAVE_USABLE_WCHAR_T': 0,
'HAVE_UTIL_H': 1,
'HAVE_UTIMES': 1,
'HAVE_UTIME_H': 1,
'HAVE_WAIT3': 1,
'HAVE_WAIT4': 1,
'HAVE_WAITPID': 1,
'HAVE_WCHAR_H': 1,
'HAVE_WCSCOLL': 1,
'HAVE_WORKING_TZSET': 1,
'HAVE_ZLIB_COPY': 1,
'HAVE__GETPTY': 0,
'HGBRANCH': '',
'HGTAG': '',
'HGVERSION': '',
'HOST_GNU_TYPE': 'x86_64-apple-darwin13.0',
'HURD_C_THREADS': 0,
'INCLDIRSTOMAKE': '/System/Library/Frameworks/Python.framework/Versions/2.7/include /System/Library/Frameworks/Python.framework/Versions/2.7/include /System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7 /System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7',
'INCLUDEDIR': '/System/Library/Frameworks/Python.framework/Versions/2.7/include',
'INCLUDEPY': '/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7',
'INSTALL': '/usr/bin/install -c',
'INSTALL_DATA': '/usr/bin/install -c -m 644',
'INSTALL_PROGRAM': '/usr/bin/install -c',
'INSTALL_SCRIPT': '/usr/bin/install -c',
'INSTALL_SHARED': '/usr/bin/install -c -m 555',
'INSTSONAME': 'Python.framework/Versions/2.7/Python',
'LDCXXSHARED': 'c++ -bundle -undefined dynamic_lookup',
'LDFLAGS': '-arch x86_64 -arch i386 -Wl,-F.',
'LDLAST': '',
'LDLIBRARY': 'Python.framework/Versions/2.7/Python',
'LDLIBRARYDIR': '',
'LDSHARED': 'cc -bundle -undefined dynamic_lookup -arch x86_64 -arch i386 -Wl,-F.',
'LIBC': '',
'LIBDEST': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7',
'LIBDIR': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib',
'LIBFFI_INCLUDEDIR': '',
'LIBM': '',
'LIBOBJDIR': 'Python/',
'LIBOBJS': '',
'LIBP': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7',
'LIBPC': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/pkgconfig',
'LIBPL': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/config',
'LIBPYTHON': '/Library/Python/2.7',
'LIBRARY': 'libpython2.7.a',
'LIBRARY_OBJS': '\\',
'LIBS': '-ldl -framework CoreFoundation',
'LIBSUBDIRS': 'lib-tk lib-tk/test lib-tk/test/test_tkinter \\',
'LIBTOOL': 'cc -dynamiclib -all_load -fno-strict-aliasing -fno-common -dynamic -arch x86_64 -arch i386 -g -Os -pipe -fno-common -fno-strict-aliasing -fwrapv -mno-fused-madd -DENABLE_DTRACE -DMACOSX -DNDEBUG -Wall -Wstrict-prototypes -Wshorten-64-to-32 -DNDEBUG -g -fwrapv -Os -Wall -Wstrict-prototypes -DENABLE_DTRACE',
'LINKCC': 'cc',
'LINKFORSHARED': '-u _PyMac_Error /System/Library/Frameworks/Python.framework/Versions/2.7/Python',
'LN': 'ln',
'LOCALMODLIBS': '',
'MACHDEP': 'darwin',
'MACHDEPPATH': ':plat-darwin',
'MACHDEPS': 'plat-darwin plat-mac plat-mac/Carbon plat-mac/lib-scriptpackages \\',
'MACHDEP_OBJS': 'Python/mactoolboxglue.o',
'MACHDESTLIB': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7',
'MACH_C_THREADS': 0,
'MACOSX_DEPLOYMENT_TARGET': '10.9',
'MAINCC': 'cc',
'MAJOR_IN_MKDEV': 0,
'MAJOR_IN_SYSMACROS': 0,
'MAKESETUP': './Modules/makesetup',
'MANDIR': '/usr/share/man',
'MEMTESTOPTS': '-l -x test_subprocess test_io test_lib2to3 \\ -x test_dl test___all__ test_fork1 \\',
'MKDIR_P': './install-sh -c -d',
'MODLIBS': '',
'MODOBJS': 'Modules/threadmodule.o Modules/signalmodule.o Modules/posixmodule.o Modules/errnomodule.o Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o Modules/_weakref.o Modules/zipimport.o Modules/symtablemodule.o Modules/xxsubtype.o',
'MODULE_OBJS': '\\',
'MULTIARCH': '',
'MVWDELCH_IS_EXPRESSION': 1,
'OBJECT_OBJS': '\\',
'OLDPATH': ':lib-old',
'OPT': '-DNDEBUG -g -fwrapv -Os -Wall -Wstrict-prototypes',
'OTHER_LIBTOOL_OPT': '',
'PACKAGE_BUGREPORT': 0,
'PACKAGE_NAME': 0,
'PACKAGE_STRING': 0,
'PACKAGE_TARNAME': 0,
'PACKAGE_URL': 0,
'PACKAGE_VERSION': 0,
'PARSER_HEADERS': '\\',
'PARSER_OBJS': '\\ Parser/myreadline.o Parser/tokenizer.o',
'PGEN': 'Parser/pgen',
'PGENOBJS': '\\ \\',
'PGENSRCS': '\\ \\',
'PGOBJS': '\\',
'PGSRCS': '\\',
'PLATDIR': 'plat-darwin',
'PLATMACDIRS': 'plat-mac plat-mac/Carbon plat-mac/lib-scriptpackages \\',
'PLATMACPATH': ':plat-mac:plat-mac/lib-scriptpackages',
'POBJS': '\\',
'POSIX_SEMAPHORES_NOT_ENABLED': 0,
'PROFILE_TASK': './Tools/pybench/pybench.py -n 2 --with-gc --with-syscheck',
'PSRCS': '\\',
'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1,
'PURIFY': '',
'PYLONG_BITS_IN_DIGIT': 0,
'PYTHON': 'python',
'PYTHONFRAMEWORK': 'Python',
'PYTHONFRAMEWORKDIR': 'Python.framework',
'PYTHONFRAMEWORKINSTALLDIR': '/System/Library/Frameworks/Python.framework',
'PYTHONFRAMEWORKPREFIX': '/System/Library/Frameworks',
'PYTHONPATH': ':plat-darwin:plat-mac:plat-mac/lib-scriptpackages:../../Extras/lib/python:lib-tk:lib-old',
'PYTHON_FOR_BUILD': './python.exe -E',
'PYTHON_HEADERS': '\\',
'PYTHON_OBJS': '\\',
'PY_CFLAGS': '-fno-strict-aliasing -fno-common -dynamic -arch x86_64 -arch i386 -g -Os -pipe -fno-common -fno-strict-aliasing -fwrapv -mno-fused-madd -DENABLE_DTRACE -DMACOSX -DNDEBUG -Wall -Wstrict-prototypes -Wshorten-64-to-32 -DNDEBUG -g -fwrapv -Os -Wall -Wstrict-prototypes -DENABLE_DTRACE -I. -IInclude -I./Include -arch x86_64 -arch i386 -pipe -DPy_BUILD_CORE',
'PY_FORMAT_LONG_LONG': '"ll"',
'PY_FORMAT_SIZE_T': '"z"',
'PY_UNICODE_TYPE': 'unsigned short',
'Py_DEBUG': 0,
'Py_ENABLE_SHARED': 0,
'Py_UNICODE_SIZE': 2,
'Py_USING_UNICODE': 1,
'QUICKTESTOPTS': '-l -x test_subprocess test_io test_lib2to3 \\',
'RANLIB': 'ranlib',
'RESSRCDIR': 'Mac/Resources/framework',
'RETSIGTYPE': 'void',
'RUNSHARED': 'DYLD_FRAMEWORK_PATH=/private/var/tmp/python/python-76~157/2.7/python:',
'SCRIPTDIR': '/System/Library/Frameworks/Python.framework/Versions/2.7/lib',
'SETPGRP_HAVE_ARG': 0,
'SGI_ABI': '',
'SHELL': '/bin/sh',
'SHLIBS': '-ldl -framework CoreFoundation',
'SHLIB_EXT': '".so"',
'SIGNAL_OBJS': '',
'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0,
'SITEPATH': '',
'SIZEOF_DOUBLE': 8,
'SIZEOF_FLOAT': 4,
'SIZEOF_FPOS_T': 8,
'SIZEOF_INT': 4,
'SIZEOF_LONG': 4,
'SIZEOF_LONG_DOUBLE': 16,
'SIZEOF_LONG_LONG': 8,
'SIZEOF_OFF_T': 8,
'SIZEOF_PID_T': 4,
'SIZEOF_PTHREAD_T': 4,
'SIZEOF_SHORT': 2,
'SIZEOF_SIZE_T': 4,
'SIZEOF_TIME_T': 4,
'SIZEOF_UINTPTR_T': 4,
'SIZEOF_VOID_P': 4,
'SIZEOF_WCHAR_T': 4,
'SIZEOF__BOOL': 1,
'SO': '.so',
'SRCDIRS': 'Parser Grammar Objects Python Modules Mac',
'SRC_GDB_HOOKS': './Tools/gdb/libpython.py',
'STDC_HEADERS': 1,
'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */",
'STRINGLIB_HEADERS': '\\',
'SUBDIRS': '',
'SUBDIRSTOO': 'Include Lib Misc Demo',
'SVNVERSION': 'svnversion .',
'SYSLIBS': '',
'SYS_SELECT_WITH_SYS_TIME': 1,
'TANH_PRESERVES_ZERO_SIGN': 1,
'TESTOPTS': '-l',
'TESTPATH': '',
'TESTPROG': './Lib/test/regrtest.py',
'TESTPYTHON': 'DYLD_FRAMEWORK_PATH=/private/var/tmp/python/python-76~157/2.7/python: ./python.exe -Wd -3 -E -tt',
'TESTPYTHONOPTS': '',
'THREADOBJ': 'Python/thread.o',
'TIME_WITH_SYS_TIME': 1,
'TKPATH': ':lib-tk',
'TM_IN_SYS_TIME': 0,
'UNICODE_OBJS': 'Objects/unicodeobject.o Objects/unicodectype.o',
'UNIVERSALSDK': '',
'USE_TOOLBOX_OBJECT_GLUE': 1,
'VA_LIST_IS_ARRAY': 0,
'VERSION': '2.7',
'WANT_SIGFPE_HANDLER': 0,
'WANT_WCTYPE_FUNCTIONS': 0,
'WINDOW_HAS_FLAGS': 0,
'WITH_DOC_STRINGS': 1,
'WITH_DYLD': 1,
'WITH_LIBINTL': 0,
'WITH_NEXT_FRAMEWORK': 1,
'WITH_PYMALLOC': 1,
'WITH_THREAD': 1,
'WITH_TSC': 0,
'WITH_VALGRIND': 0,
'X87_DOUBLE_ROUNDING': 0,
'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax',
'abs_builddir': '/private/var/tmp/python/python-76~157/2.7/python',
'abs_srcdir': '/private/var/tmp/python/python-76~157/2.7/python',
'build': 'x86_64-apple-darwin13.0',
'datarootdir': '/System/Library/Frameworks/Python.framework/Versions/2.7/share',
'exec_prefix': '/System/Library/Frameworks/Python.framework/Versions/2.7',
'host': 'x86_64-apple-darwin13.0',
'prefix': '/System/Library/Frameworks/Python.framework/Versions/2.7',
'srcdir': '.'}
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Flake8 cannot disable a warning for the file. Flake8 does not like beam code
# and reports many 'W503 line break before binary operator' errors. So turn off
# flake8 for this file.
# flake8: noqa
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import json
import logging
import os
import random
import sys
import apache_beam as beam
from apache_beam.metrics import Metrics
import six
import textwrap
from tensorflow.python.lib.io import file_io
from tensorflow_transform import coders
from tensorflow_transform.beam import impl as tft
from tensorflow_transform.beam import tft_beam_io
from tensorflow_transform.tf_metadata import metadata_io
img_error_count = Metrics.counter('main', 'ImgErrorCount')
# Files
SCHEMA_FILE = 'schema.json'
FEATURES_FILE = 'features.json'
TRANSFORMED_METADATA_DIR = 'transformed_metadata'
RAW_METADATA_DIR = 'raw_metadata'
TRANSFORM_FN_DIR = 'transform_fn'
# Individual transforms
TARGET_TRANSFORM = 'target'
IMAGE_URL_TO_VEC_TRANSFORM = 'img_url_to_vec'
def parse_arguments(argv):
"""Parse command line arguments.
Args:
argv: list of command line arguments including program name.
Returns:
The parsed arguments as returned by argparse.ArgumentParser.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Runs preprocessing on raw data for TensorFlow training.
This script applies some transformations to raw data to improve
training performance. Some data transformations can be expensive
such as the tf-idf text column transformation. During training, the
same raw data row might be used multiply times to train a model. This
means the same transformations are applied to the same data row
multiple times. This can be very inefficient, so this script applies
partial transformations to the raw data and writes an intermediate
preprocessed datasource to disk for training.
Running this transformation step is required for two usage paths:
1) If the img_url_to_vec transform is used. This is because
preprocessing as image is expensive and TensorFlow cannot easily
read raw image files during training.
2) If the raw data is in BigQuery. TensorFlow cannot read from a
BigQuery source.
Running this transformation step is recommended if a text transform is
used (like tf-idf or bag-of-words), and the text value for each row
is very long.
Running this transformation step may not have an interesting training
performance impact if the transforms are all simple like scaling
numerical values."""))
parser.add_argument(
'--project-id',
help='The project to which the job will be submitted. Only needed if '
'--cloud is used.')
parser.add_argument(
'--cloud',
action='store_true',
help='Run preprocessing on the cloud.')
parser.add_argument(
'--job-name',
type=str,
help='Unique job name if running on the cloud.')
parser.add_argument(
'--csv-file-pattern',
required=False,
help='CSV data to transform.')
# If using bigquery table
parser.add_argument(
'--bigquery-table',
type=str,
required=False,
help=('Must be in the form `project.dataset.table_name`. BigQuery '
'data to transform'))
parser.add_argument(
'--analyze-output-dir',
required=True,
help='The output folder of analyze')
parser.add_argument(
'--output-filename-prefix',
required=True,
type=str)
parser.add_argument(
'--output-dir',
default=None,
required=True,
help=('Google Cloud Storage or Local directory in which '
'to place outputs.'))
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument('--target', dest='target', action='store_true')
feature_parser.add_argument('--no-target', dest='target', action='store_false')
parser.set_defaults(target=True)
parser.add_argument(
'--shuffle',
action='store_true',
default=False)
args, _ = parser.parse_known_args(args=argv[1:])
if args.cloud and not args.project_id:
raise ValueError('--project-id is needed for --cloud')
if not args.job_name:
args.job_name = ('dataflow-job-{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S')))
return args
@beam.ptransform_fn
def shuffle(pcoll): # pylint: disable=invalid-name
return (pcoll
| 'PairWithRandom' >> beam.Map(lambda x: (random.random(), x))
| 'GroupByRandom' >> beam.GroupByKey()
| 'DropRandom' >> beam.FlatMap(lambda (k, vs): vs))
def image_transform_columns(features):
"""Returns a list of columns that prepare_image_transforms() should run on.
Because of beam + pickle, IMAGE_URL_TO_VEC_TRANSFORM cannot be used inside of
a beam function, so we extract the columns prepare_image_transforms() should
run on outside of beam.
"""
img_cols = []
for name, transform in six.iteritems(features):
if transform['transform'] == IMAGE_URL_TO_VEC_TRANSFORM:
img_cols.append(name)
return img_cols
def prepare_image_transforms(element, image_columns):
"""Replace an images url with its jpeg bytes as a web safe base64 string.
Args:
"""
from PIL import Image
import base64
import six
from tensorflow.python.lib.io import file_io as tf_file_io
for name in image_columns:
uri = element[name]
try:
with tf_file_io.FileIO(uri, 'r') as f:
img = Image.open(f).convert('RGB')
# A variety of different calling libraries throw different exceptions here.
# They all correspond to an unreadable file so we treat them equivalently.
# pylint: disable broad-except
except Exception as e:
logging.exception('Error processing image %s: %s', uri, str(e))
img_error_count.inc()
return
# Convert to desired format and output.
output = six.BytesIO()
img.save(output, 'jpeg')
image_bytes = output.getvalue()
element[name] = base64.urlsafe_b64encode(image_bytes)
return element
def preprocess(pipeline, args):
input_metadata = metadata_io.read_metadata(
os.path.join(args.analyze_output_dir, RAW_METADATA_DIR))
schema = json.loads(file_io.read_file_to_string(
os.path.join(args.analyze_output_dir, SCHEMA_FILE)).decode())
features = json.loads(file_io.read_file_to_string(
os.path.join(args.analyze_output_dir, FEATURES_FILE)).decode())
column_names = [col['name'] for col in schema]
exclude_outputs = None
if not args.target:
for name, transform in six.iteritems(features):
if transform['transform'] == TARGET_TRANSFORM:
target_name = name
column_names.remove(target_name)
exclude_outputs = [target_name]
del input_metadata.schema.column_schemas[target_name]
break
if args.csv_file_pattern:
coder = coders.CsvCoder(column_names, input_metadata.schema, delimiter=',')
raw_data = (
pipeline
| 'ReadCsvData' >> beam.io.ReadFromText(args.csv_file_pattern)
| 'ParseCsvData' >> beam.Map(coder.decode))
else:
columns = ', '.join(column_names)
query = 'SELECT {columns} FROM `{table}`'.format(columns=columns,
table=args.bigquery_table)
raw_data = (
pipeline
| 'ReadBiqQueryData'
>> beam.io.Read(beam.io.BigQuerySource(query=query,
use_standard_sql=True)))
# Note that prepare_image_transforms does not make embeddints, it justs reads
# the image files and converts them to base64 stings. tft.TransformDataset()
# will apply the saved model that makes the image embeddings.
image_columns = image_transform_columns(features)
raw_data = (
raw_data
| 'PreprocessTransferredLearningTransformations'
>> beam.Map(prepare_image_transforms, image_columns))
if args.shuffle:
raw_data = raw_data | 'ShuffleData' >> shuffle()
transform_fn = (
pipeline
| 'ReadTransformFn'
>> tft_beam_io.ReadTransformFn(args.analyze_output_dir))
(transformed_data, transform_metadata) = (
((raw_data, input_metadata), transform_fn)
| 'ApplyTensorflowPreprocessingGraph'
>> tft.TransformDataset(exclude_outputs))
tfexample_coder = coders.ExampleProtoCoder(transform_metadata.schema)
_ = (transformed_data
| 'SerializeExamples' >> beam.Map(tfexample_coder.encode)
| 'WriteExamples'
>> beam.io.WriteToTFRecord(
os.path.join(args.output_dir, args.output_filename_prefix),
file_name_suffix='.tfrecord.gz'))
def main(argv=None):
"""Run Preprocessing as a Dataflow."""
args = parse_arguments(sys.argv if argv is None else argv)
temp_dir = os.path.join(args.output_dir, 'tmp')
if args.cloud:
pipeline_name = 'DataflowRunner'
else:
pipeline_name = 'DirectRunner'
options = {
'job_name': args.job_name,
'temp_location': temp_dir,
'project': args.project_id,
'setup_file':
os.path.abspath(os.path.join(
os.path.dirname(__file__),
'setup.py')),
}
pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)
with beam.Pipeline(pipeline_name, options=pipeline_options) as p:
with tft.Context(temp_dir=temp_dir):
preprocess(
pipeline=p,
args=args)
if __name__ == '__main__':
main()
| |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.mgmt.botservice.models import (
BotChannel,
DirectLineChannel,
DirectLineChannelProperties,
DirectLineSite)
def create_channel(client, channel, channel_name, resource_group_name, resource_name):
botChannel = BotChannel(
location='global',
properties=channel
)
return client.create(
resource_group_name=resource_group_name,
resource_name=resource_name,
channel_name=channel_name,
parameters=botChannel
)
def update_channel(client, channel, channel_name, resource_group_name, resource_name):
bot_channel_data = client.get(
resource_group_name,
resource_name,
channel_name)
sites = bot_channel_data.properties.properties.sites
for site in sites:
if site.site_name == channel.properties.sites[0].site_name:
channel.properties.sites[0].site_id = site.site_id
return client.update(
resource_group_name=resource_group_name,
resource_name=resource_name,
channel_name=channel_name,
properties=channel
)
def facebook_create(client, resource_group_name, resource_name, page_id, app_id, app_secret, access_token, is_disabled=None): # pylint: disable=line-too-long
from azure.mgmt.botservice.models import FacebookChannel, FacebookChannelProperties, FacebookPage
channel = FacebookChannel(
properties=FacebookChannelProperties(
pages=[FacebookPage(id=page_id, access_token=access_token)],
app_id=app_id,
app_secret=app_secret,
is_enabled=not is_disabled
)
)
return create_channel(client, channel, 'FacebookChannel', resource_group_name, resource_name)
def email_create(client, resource_group_name, resource_name, email_address, password, is_disabled=None):
from azure.mgmt.botservice.models import EmailChannel, EmailChannelProperties
channel = EmailChannel(
properties=EmailChannelProperties(
email_address=email_address,
password=password,
is_enabled=not is_disabled
)
)
return create_channel(client, channel, 'EmailChannel', resource_group_name, resource_name)
def msteams_create(client, resource_group_name, resource_name, is_disabled=None,
enable_calling=None, calling_web_hook=None):
from azure.mgmt.botservice.models import MsTeamsChannel, MsTeamsChannelProperties
channel = MsTeamsChannel(
properties=MsTeamsChannelProperties(
is_enabled=not is_disabled,
enable_calling=enable_calling,
calling_web_hook=calling_web_hook
)
)
return create_channel(client, channel, 'MsTeamsChannel', resource_group_name, resource_name)
def skype_create(client, resource_group_name, resource_name, is_disabled=None, enable_messaging=None,
enable_media_cards=None, enable_video=None, enable_calling=None,
enable_screen_sharing=None, enable_groups=None, groups_mode=None, calling_web_hook=None):
from azure.mgmt.botservice.models import SkypeChannel, SkypeChannelProperties
channel = SkypeChannel(
properties=SkypeChannelProperties(
is_enabled=not is_disabled,
enable_messaging=enable_messaging,
enable_media_cards=enable_media_cards,
enable_video=enable_video,
enable_calling=enable_calling,
enable_screen_sharing=enable_screen_sharing,
enable_groups=enable_groups,
groups_mode=groups_mode,
calling_web_hook=calling_web_hook
)
)
return create_channel(client, channel, 'SkypeChannel', resource_group_name, resource_name)
def kik_create(client, resource_group_name, resource_name, user_name, api_key, is_disabled=None, is_validated=None):
from azure.mgmt.botservice.models import KikChannel, KikChannelProperties
channel = KikChannel(
properties=KikChannelProperties(
user_name=user_name,
api_key=api_key,
is_enabled=not is_disabled,
is_validated=is_validated
)
)
return create_channel(client, channel, 'KikChannel', resource_group_name, resource_name)
def directline_create(client, resource_group_name, resource_name, is_disabled=None,
is_v1_disabled=None, is_v3_disabled=None, site_name='Default Site',
enable_enhanced_auth=False, trusted_origins=None):
if not trusted_origins:
trusted_origins = []
channel = DirectLineChannel(
properties=DirectLineChannelProperties(
sites=[DirectLineSite(
site_name=site_name,
is_enabled=not is_disabled,
is_v1_enabled=not is_v1_disabled,
is_v3_enabled=not is_v3_disabled,
is_secure_site_enabled=enable_enhanced_auth,
trusted_origins=trusted_origins
)]
)
)
return create_channel(
client, channel, 'DirectLineChannel',
resource_group_name, resource_name)
def directline_update(client, resource_group_name, resource_name, is_disabled=None,
is_v1_disabled=None, is_v3_disabled=None, site_name='Default Site',
enable_enhanced_auth=False, trusted_origins=None):
if not trusted_origins:
trusted_origins = []
channel = DirectLineChannel(
properties=DirectLineChannelProperties(
sites=[DirectLineSite(
site_name=site_name,
is_enabled=not is_disabled,
is_v1_enabled=not is_v1_disabled,
is_v3_enabled=not is_v3_disabled,
is_secure_site_enabled=enable_enhanced_auth,
trusted_origins=trusted_origins
)]
)
)
return update_channel(client, channel, 'DirectLineChannel', resource_group_name, resource_name)
def telegram_create(client, resource_group_name, resource_name, access_token, is_disabled=None, is_validated=None):
from azure.mgmt.botservice.models import TelegramChannel, TelegramChannelProperties
channel = TelegramChannel(
properties=TelegramChannelProperties(
access_token=access_token,
is_enabled=not is_disabled,
is_validated=is_validated
)
)
return create_channel(client, channel, 'TelegramChannel', resource_group_name, resource_name)
def sms_create(client, resource_group_name, resource_name, phone, account_sid, auth_token, is_disabled=None, is_validated=None): # pylint: disable=line-too-long
from azure.mgmt.botservice.models import SmsChannel, SmsChannelProperties
channel = SmsChannel(
properties=SmsChannelProperties(
phone=phone,
account_sid=account_sid,
auth_token=auth_token,
is_enabled=not is_disabled,
is_validated=is_validated
)
)
return create_channel(client, channel, 'SmsChannel', resource_group_name, resource_name)
def slack_create(client, resource_group_name, resource_name, client_id, client_secret, verification_token,
is_disabled=None, landing_page_url=None):
from azure.mgmt.botservice.models import SlackChannel, SlackChannelProperties
channel = SlackChannel(
properties=SlackChannelProperties(
client_id=client_id,
client_secret=client_secret,
verification_token=verification_token,
landing_page_url=landing_page_url,
is_enabled=not is_disabled
)
)
return create_channel(client, channel, 'SlackChannel', resource_group_name, resource_name)
class ChannelOperations: # pylint: disable=too-few-public-methods
def __init__(self):
for channel in ['facebook', 'email', 'msTeams', 'skype', 'kik', 'webChat', 'directLine', 'telegram', 'sms', 'slack']: # pylint: disable=line-too-long
channelName = '{}Channel'.format(channel)
channelName = channelName[:1].upper() + channelName[1:]
def get_wrapper(channel_name):
def get(client, resource_group_name, resource_name, show_secrets=None):
if show_secrets:
return client.list_with_keys(
resource_group_name=resource_group_name,
resource_name=resource_name,
channel_name=channel_name,
)
return client.get(
resource_group_name=resource_group_name,
resource_name=resource_name,
channel_name=channel_name
)
return get
def delete_wrapper(channel_name):
def delete(client, resource_group_name, resource_name):
return client.delete(
resource_group_name=resource_group_name,
resource_name=resource_name,
channel_name=channel_name
)
return delete
setattr(self, '{}_get'.format(channel.lower()), get_wrapper(channelName))
setattr(self, '{}_delete'.format(channel.lower()), delete_wrapper(channelName))
channelOperationsInstance = ChannelOperations()
| |
import sys,string,os
sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies
import scipy, numpy
import statistics
from visualization_scripts import clustering
def evaluateMultiLinRegulatoryStructure(all_genes_TPM,MarkerFinder,SignatureGenes,state=None,query=None):
all_indexes, group_index, expressionData = loopThroughEachState(all_genes_TPM)
if state!=None:
states = [state] ### For example, we only want to look in annotated Multi-Lin's
else:
states = group_index
state_scores=[]
for state in states:
print '\n',state, 'running now.'
score = evaluateStateRegulatoryStructure(expressionData,all_indexes,group_index,MarkerFinder,SignatureGenes,state,query=query)
state_scores.append([score,state])
print state, score
state_scores.sort()
state_scores.reverse()
print state_scores
def loopThroughEachState(all_genes_TPM):
### Import all genes with TPM values for all cells
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(all_genes_TPM)
group_index={}
all_indexes=[]
for sampleName in group_db:
ICGS_state = group_db[sampleName][0]
try: group_index[ICGS_state].append(column_header.index(sampleName))
except Exception: group_index[ICGS_state] = [column_header.index(sampleName)]
all_indexes.append(column_header.index(sampleName))
for ICGS_state in group_index:
group_index[ICGS_state].sort()
all_indexes.sort()
expressionData = matrix, column_header, row_header, dataset_name, group_db
return all_indexes, group_index, expressionData
def evaluateStateRegulatoryStructure(expressionData, all_indexes,group_index,MarkerFinder,SignatureGenes,state,query=None):
"""Predict multi-lineage cells and their associated coincident lineage-defining TFs"""
useProbablityOfExpression=False
ICGS_State_as_Row = False
matrix, column_header, row_header, dataset_name, group_db = expressionData
def importGeneLists(fn):
genes={}
for line in open(fn,'rU').xreadlines():
data = clustering.cleanUpLine(line)
gene,cluster = string.split(data,'\t')[0:2]
genes[gene]=cluster
return genes
def importMarkerFinderHits(fn):
genes={}
genes_to_symbol={}
ICGS_State_ranked={}
skip=True
for line in open(fn,'rU').xreadlines():
data = clustering.cleanUpLine(line)
if skip: skip=False
else:
try:
gene,symbol,rho,ICGS_State = string.split(data,'\t')
except Exception:
gene,symbol,rho,rho_p,ICGS_State = string.split(data,'\t')
genes_to_symbol[gene]=symbol
#if ICGS_State!=state and float(rho)>0.0:
if float(rho)>0.3:
try: ICGS_State_ranked[ICGS_State].append([float(rho),gene,symbol])
except Exception: ICGS_State_ranked[ICGS_State] = [[float(rho),gene,symbol]]
for ICGS_State in ICGS_State_ranked:
ICGS_State_ranked[ICGS_State].sort()
ICGS_State_ranked[ICGS_State].reverse()
#print ICGS_State, ICGS_State_ranked[ICGS_State][:50]
for (rho,gene,symbol) in ICGS_State_ranked[ICGS_State][:50]:
genes[gene]=rho,ICGS_State ### Retain all population specific genes (lax)
genes[symbol]=rho,ICGS_State
return genes, genes_to_symbol
def importQueryDataset(fn):
matrix, column_header, row_header, dataset_name, group_db = clustering.importData(fn)
return matrix, column_header, row_header, dataset_name, group_db
signatureGenes = importGeneLists(SignatureGenes)
markerFinderGenes, genes_to_symbol = importMarkerFinderHits(MarkerFinder)
#print len(signatureGenes),len(markerFinderGenes)
### Determine for each gene, its population frequency per cell state
index=0
expressedGenesPerState={}
stateAssociatedMarkers={}
def freqCutoff(x,cutoff):
if x>cutoff: return 1 ### minimum expression cutoff
else: return 0
for row in matrix:
ICGS_state_gene_frq={}
gene = row_header[index]
for ICGS_state in group_index:
state_values = map(lambda i: row[i],group_index[ICGS_state])
def freqCheck(x):
if x>1: return 1 ### minimum expression cutoff
else: return 0
expStateCells = sum(map(lambda x: freqCheck(x),state_values))
statePercentage = (float(expStateCells)/len(group_index[ICGS_state]))
ICGS_state_gene_frq[ICGS_state] = statePercentage
datasets_values = map(lambda i: row[i],all_indexes)
all_cells_frq = sum(map(lambda x: freqCheck(x),datasets_values))/(len(datasets_values)*1.0)
all_states_frq = map(lambda x: ICGS_state_gene_frq[x],ICGS_state_gene_frq)
all_states_frq.sort() ### frequencies of all non-multilin states
states_expressed = sum(map(lambda x: freqCutoff(x,0.5),all_states_frq))/(len(all_states_frq)*1.0)
for State in ICGS_state_gene_frq:
state_frq = ICGS_state_gene_frq[State]
rank = all_states_frq.index(state_frq)
if state_frq > 0.25 and rank>0: #and states_expressed<0.75 #and all_cells_frq>0.75
if 'Rik' not in gene and 'Gm' not in gene and '-' not in gene:
if gene in markerFinderGenes:# and gene in markerFinderGenes:
if ICGS_State_as_Row:
ICGS_State = signatureGenes[gene]
if gene in markerFinderGenes:
if ICGS_State_as_Row == False:
rho, ICGS_State = markerFinderGenes[gene]
else:
rho, ICGS_Cell_State = markerFinderGenes[gene] #ICGS_Cell_State
#try: gene = genes_to_symbol[gene]
#except: gene = gene
score = int(rho*100*state_frq)*(float(rank)/len(all_states_frq))
try: expressedGenesPerState[ICGS_State].append((score,gene))
except Exception: expressedGenesPerState[ICGS_State]=[(score,gene)] #(rank*multilin_frq)
try: stateAssociatedMarkers[gene,ICGS_State].append(State)
except Exception: stateAssociatedMarkers[gene,ICGS_State] = [State]
index+=1
if query!=None:
matrix, column_header, row_header, dataset_name, group_db = importQueryDataset(query)
markers_to_exclude=[]
expressedGenesPerState2={}
for (gene,ICGS_State) in stateAssociatedMarkers:
if len(stateAssociatedMarkers[(gene,ICGS_State)])<2: # or len(stateAssociatedMarkers[(gene,ICGS_State)])>len(ICGS_state_gene_frq)/2.0:
markers_to_exclude.append(gene)
else:
print ICGS_State, gene, stateAssociatedMarkers[(gene,ICGS_State)]
for ICGS_State in expressedGenesPerState:
for (score,gene) in expressedGenesPerState[ICGS_State]:
if gene not in markers_to_exclude:
try: expressedGenesPerState2[ICGS_State].append((score,gene))
except Exception: expressedGenesPerState2[ICGS_State] = [(score,gene)]
expressedGenesPerState = expressedGenesPerState2
createPseudoCell=True
### The expressedGenesPerState defines genes and modules co-expressed in the multi-Lin
### Next, find the cells that are most frequent in mulitple states
representativeMarkers={}
for ICGS_State in expressedGenesPerState:
expressedGenesPerState[ICGS_State].sort()
expressedGenesPerState[ICGS_State].reverse()
if '1Multi' not in ICGS_State:
markers = expressedGenesPerState[ICGS_State]#[:5]
markers_unique = list(set(map(lambda x: x[1],list(markers))))
print ICGS_State,":",string.join(markers_unique,', ')
if createPseudoCell:
for gene in markers:
def getBinary(x):
if x>1: return 1
else: return 0
if gene[1] in row_header: ### Only for query datasets
row_index = row_header.index(gene[1])
if useProbablityOfExpression:
pvalues = calculateGeneExpressProbilities(matrix[row_index]) ### probability of expression
values = pvalues
else:
binaryValues = map(lambda x: getBinary(x), matrix[row_index])
values = binaryValues
#values = matrix[row_index]
#if gene[1]=='S100a8': print binaryValues;sys.exit()
try: representativeMarkers[ICGS_State].append(values)
except Exception: representativeMarkers[ICGS_State] = [values]
else:
representativeMarkers[ICGS_State]=markers[0][-1]
#int(len(markers)*.25)>5:
#print ICGS_State, markers
#sys.exit()
for ICGS_State in representativeMarkers:
if createPseudoCell:
signature_values = representativeMarkers[ICGS_State]
if useProbablityOfExpression:
signature_values = [numpy.sum(value) for value in zip(*signature_values)]
else:
signature_values = [float(numpy.mean(value)) for value in zip(*signature_values)]
representativeMarkers[ICGS_State] = signature_values
else:
gene = representativeMarkers[ICGS_State]
row_index = row_header.index(gene)
gene_values = matrix[row_index]
representativeMarkers[ICGS_State] = gene_values
### Determine for each gene, its population frequency per cell state
expressedStatesPerCell={}
multilin_probability={}
import export
print 'Writing results matrix to:',MarkerFinder[:-4]+'-cellStateScores.txt'
eo = export.ExportFile(MarkerFinder[:-4]+'-cellStateScores.txt')
eo.write(string.join(['UID']+column_header,'\t')+'\n')
print 'a'
print len(representativeMarkers)
for ICGS_State in representativeMarkers:
gene_values = representativeMarkers[ICGS_State]
index=0
scoreMatrix=[]
HitsCount=0
for cell in column_header:
value = gene_values[index]
"""
expressedLiklihood = '0'
if (value<0.05 and useProbablityOfExpression==True) or (value==1 and useProbablityOfExpression==False):
try: expressedStatesPerCell[cell].append(ICGS_State)
except Exception: expressedStatesPerCell[cell] = [ICGS_State]
expressedLiklihood = '1'
HitsCount+=1
if useProbablityOfExpression:
try: multilin_probability[cell].append(value)
except Exception: multilin_probability[cell] = [value]
"""
index+=1
HitsCount+=1
scoreMatrix.append(str(value))
if HitsCount>1:
#print ICGS_State,HitsCount
eo.write(string.join([ICGS_State]+scoreMatrix,'\t')+'\n')
eo.close()
sys.exit()
def multiply(values):
p = 1
for i in values:
if i>0:
p = p*i
else:
p = p*1.e-16
return p
cell_mutlilin_ranking=[]
for cell in expressedStatesPerCell:
#if 'Multi-Lin:Gmp.R3.10' in cell: sys.exit()
if useProbablityOfExpression:
p = numpy.mean(multilin_probability[cell]) ### mean state probability
lineageCount = expressedStatesPerCell[cell]
if useProbablityOfExpression:
cell_mutlilin_ranking.append((p,len(lineageCount),cell))
else:
cell_mutlilin_ranking.append((len(lineageCount),cell))
cell_mutlilin_ranking.sort()
if useProbablityOfExpression == False:
cell_mutlilin_ranking.reverse()
scores = []
state_scores={}
cellsPerState={} ### Denominator for z-score analysis
for cell in cell_mutlilin_ranking:
score = cell[0]
scores.append(score)
cell_state = string.split(cell[-1],':')[0]
try: cellsPerState[cell_state]+=1
except Exception: cellsPerState[cell_state]=1
try: state_scores[cell_state].append(float(score))
except Exception: state_scores[cell_state] = [float(score)]
scoreMean = numpy.mean(scores)
scoreSD = numpy.std(scores)
oneSD = scoreMean+scoreSD
twoSD = scoreMean+scoreSD+scoreSD
oneStandDeviationAway={}
twoStandDeviationsAway={}
oneStandDeviationAwayTotal=0
twoStandDeviationsAwayTotal=0
print 'Mean:',scoreMean
print 'STDev:',scoreSD
state_scores2=[]
for cell_state in state_scores:
state_scores2.append((numpy.mean(state_scores[cell_state]),cell_state))
i=0
for cell in cell_mutlilin_ranking:
score,cellName = cell
CellState,CellName = string.split(cellName,':')
if score>=oneSD:
try: oneStandDeviationAway[CellState]+=1
except Exception: oneStandDeviationAway[CellState]=1
oneStandDeviationAwayTotal+=1
if score>=twoSD:
try: twoStandDeviationsAway[CellState]+=1
except Exception: twoStandDeviationsAway[CellState]=1
twoStandDeviationsAwayTotal+=1
print cell, string.join(expressedStatesPerCell[cell[-1]],'|')
i+=1
state_scores2
state_scores2.sort()
state_scores2.reverse()
twoStandDeviationsAway = oneStandDeviationAway
twoStandDeviationsAwayTotal = oneStandDeviationAwayTotal
print '\n\n'
import statistics
zscores = []
for CellState in twoStandDeviationsAway:
#print CellState
highMetaScoreCells = twoStandDeviationsAway[CellState]
totalCellsPerState = cellsPerState[CellState]
r = highMetaScoreCells
n = twoStandDeviationsAwayTotal
R = totalCellsPerState
N = len(column_header)
z = statistics.zscore(r,n,N,R)
scores = [z, CellState,statistics.p_value(z)]
zscores.append(scores)
zscores.sort()
zscores.reverse()
for scores in zscores:
scores = string.join(map(str,scores),'\t')
print scores
"""
for i in state_scores2:
print str(i[0])+'\t'+str(i[1])"""
sys.exit()
return numpy.mean(state_scores)
def calculateGeneExpressProbilities(values, useZ=False):
### First calculate z-scores - scipy.stats.mstats.zscore for the entire matrix
avg = numpy.mean(values)
std = numpy.std(values)
if std ==0:
std = 0.1
if useZ:
values = map(lambda x: (x-avg)/std,values)
else:
values = map(lambda x: x*2,values)
p_values = 1 - scipy.special.ndtr(values)
return p_values
if __name__ == '__main__':
#query_dataset = '/Users/saljh8/Desktop/Old Mac/Desktop/demo/Mm_Gottgens_3k-scRNASeq/ExpressionInput/exp.GSE81682_HTSeq-cellHarmony-filtered.txt'
all_tpm = '/Users/saljh8/Downloads/test1/exp.cellHarmony.txt'
markerfinder = '/Users/saljh8/Downloads/test1/AllGenes_correlations-ReplicateBased.txt'
signature_genes = '/Users/saljh8/Desktop/dataAnalysis/Collaborative/Grimes/All-10x/CITE-Seq_mLSK-60ADT/Merged/ExpressionInput/MF.txt'
state = 'DC'
#all_tpm = '/Users/saljh8/Desktop/dataAnalysis/Collaborative/Grimes/All-Fluidigm/updated.8.29.17/ExpressionInput/exp.Guide3-cellHarmony-revised.txt'
#markerfinder = '/Users/saljh8/Desktop/dataAnalysis/Collaborative/Grimes/All-Fluidigm/updated.8.29.17/ExpressionOutput/MarkerFinder/AllGenes_correlations-ReplicateBased.txt'
#signature_genes = '/Users/saljh8/Desktop/Old Mac/Desktop/Grimes/KashishNormalization/test/Panorama.txt'
query_dataset = None
query_dataset = '/Users/saljh8/Desktop/dataAnalysis/Collaborative/Grimes/All-Fluidigm/exp.NaturePan-PreGM-CD150-.txt'
query_dataset = None
"""
#all_tpm = '/Users/saljh8/Desktop/Old Mac/Desktop/demo/Mm_Gottgens_3k-scRNASeq/ExpressionInput/MultiLin/exp.Gottgens_HarmonizeReference.txt'
all_tpm = '/Users/saljh8/Desktop/Old Mac/Desktop/demo/Mm_Gottgens_3k-scRNASeq/MultiLin/ExpressionInput/exp.Gottgens_HarmonizeReference.txt'
#signature_genes = '/Users/saljh8/Desktop/Old Mac/Desktop/demo/Mm_Gottgens_3k-scRNASeq/MultiLin/ExpressionInput/Gottgens_HarmonizeReference.txt'
signature_genes = '/Users/saljh8/Desktop/Old Mac/Desktop/demo/Mm_Gottgens_3k-scRNASeq/MultiLin/Gottgens_HarmonizeReference.txt'
#markerfinder = '/Users/saljh8/Desktop/Old Mac/Desktop/demo/Mm_Gottgens_3k-scRNASeq/ExpressionOutput/MarkerFinder/AllGenes_correlations-ReplicateBased.txt'
markerfinder = '/Users/saljh8/Desktop/Old Mac/Desktop/demo/Mm_Gottgens_3k-scRNASeq/MultiLin/ExpressionInput/MarkerFinder/AllGenes_correlations-ReplicateBased.txt'
state = 'Eryth_Multi-Lin'
"""
state = None
import getopt
options, remainder = getopt.getopt(sys.argv[1:],'', ['q=','expdir=','m=','ICGS=','state='])
#print sys.argv[1:]
for opt, arg in options:
if opt == '--q': query_dataset=arg
elif opt == '--expdir': all_tpm=arg
elif opt == '--m': markerfinder=arg
elif opt == '--ICGS': signature_genes=arg
elif opt == '--state': state=arg
#state = None
#evaluateMultiLinRegulatoryStructure(all_tpm,markerfinder,signature_genes,state);sys.exit()
evaluateMultiLinRegulatoryStructure(all_tpm,markerfinder,signature_genes,state,query = query_dataset);sys.exit()
| |
from flask import Blueprint, redirect, render_template, url_for, g, session, \
get_template_attribute, current_app, request, make_response, jsonify
from werkzeug import secure_filename
from flask_application.controllers import TemplateView
from flask_application.profiles.decorators import ajax_catch_error
from flask_application.profiles.constants import profile_constants as pc
from flask_application.profiles.models import *
from flask_application.utils.html import convert_html_entities, sanitize_html
from flask_application import app
from flask.ext.mobility.decorators import mobilized
import os.path
from copy import deepcopy
from random import randint
profiles = Blueprint('profiles', __name__)
import urlparse
def is_url(url):
return urlparse.urlparse(url).scheme != ""
class ProfileView(TemplateView):
def get(self, slug):
profile = Profile.objects.get_or_404(username__iexact=slug)
if current_app.dropbox.is_authenticated and\
current_app.dropbox.account_info['email'] == profile.owner_email:
return render_template('profiles/neo.html', profile=profile, is_me=True)
# return render_template('profiles/me.html', profile=profile)
else:
# return render_template('profiles/detail.html', profile=profile)
return render_template('profiles/neo.html', profile=profile)
@mobilized(get)
def get(self, slug):
profile = Profile.objects.get_or_404(username=slug)
return render_template('mobile/profiles/detail.html', profile=profile)
class ListView(TemplateView):
def get(self, slug):
if slug == 'all':
return render_template('profiles/list.html',
profiles=Profile.objects.all(),
title='profiles')
elif slug == 'examples':
return render_template('profiles/list.html',
profiles=Profile.objects(is_example=True),
title='example profiles')
else:
return redirect('404')
@mobilized(get)
def get(self, slug):
if slug == 'all':
return render_template('mobile/profiles/list.html',
profiles=Profile.objects.all(),
title='profiles')
elif slug == 'examples':
return render_template('mobile/profiles/list.html',
profiles=Profile.objects(is_example=True),
title='example profiles')
else:
return redirect('404')
class EditView(ProfileView):
def get(self, slug):
profile = Profile.objects.get_or_404(username__iexact=slug)
if current_app.dropbox.is_authenticated and \
current_app.dropbox.account_info['email'] == profile.owner_email:
session['temp_profile'] = deepcopy(profile)
session['dropbox_paths_to_delete'] = []
# return render_template('profiles/edit.html', profile=profile)
return render_template('profiles/neo_edit.html', profile=profile)
else:
return redirect('404')
def get_user_profile_edit(self):
if 'temp_profile' in session:
return Profile(**session['temp_profile'])
username = ''
if current_app.dropbox.is_authenticated:
username = current_app.dropbox.account_info['email']
return Profile.objects.get_or_404(owner_email=username)
def save_user_profile_edit(self, profile):
if 'temp_profile' in session:
session['temp_profile'] = profile
return
try:
profile.save()
except Exception as e:
print 'faile', e
# common html update functions
def sidebar_html_update(self, obj_response, profile):
sidebar_macro = get_template_attribute('profiles/_neo.html',
'render_editable_imglinks')
sidebar_html = sidebar_macro(profile.sidebar.img_links)
obj_response.html("#imglink-container", sidebar_html)
modal_macro = get_template_attribute('profiles/_neo.html',
'render_modals')
modal_html = modal_macro(profile)
obj_response.html("#modals", modal_html)
def description_content_html_update(self, obj_response, profile):
desc_macro = get_template_attribute('profiles/_neo.html',
'render_description_content')
desc_html = desc_macro(profile.description)
obj_response.html('#description-content', desc_html)
desc_tabs_macro = get_template_attribute('profiles/_neo.html',
'render_description_tabs')
desc_tabs_html = desc_tabs_macro(profile.description, True)
obj_response.html('#left-sidebar-tabs', desc_tabs_html)
def gallery_html_update(self, obj_response, profile):
navtabs_macro = get_template_attribute('profiles/_editable.html',
'render_navtabs_gallery')
navtabs_html = navtabs_macro(profile.gallery)
obj_response.html('#navtabs_gallery', navtabs_html)
desc_macro = get_template_attribute('profiles/_editable.html',
'render_table_gallery')
desc_html = desc_macro(profile.gallery)
obj_response.html('#table_gallery', desc_html)
def gallery_links_html_update(self, obj_response, profile, table):
links_macro = get_template_attribute('profiles/_editable.html',
'render_gallery_table_links')
links_html = links_macro(table)
element = '#links' + str(table.order)
obj_response.html(element, links_html)
def extract_desc_content(self, tables):
out = {}
for tab_name, tbl in tables.iteritems():
table = EditableImageTable()
table.order = tbl['order']
table.text = format_input(tbl['text'])
out[tab_name] = table
return out
def update_description_content(self, profile, tables):
desc_images = [tbl.images for tbl in profile.description.get_tables()]
profile.description.tables = self.extract_desc_content(tables)
for tbl in profile.description.get_tables():
tbl.images = desc_images[tbl.order]
@ajax_catch_error
def discard_changes_handler(self, obj_response, content):
username = current_app.dropbox.account_info['email']
profile = Profile.objects.get_or_404(owner_email=username)
profile.dropbox_cleanup()
obj_response.redirect(url_for('profiles.detail', slug=profile.username))
@ajax_catch_error
def save_profile_handler(self, obj_response, content):
profile = self.get_user_profile_edit()
profile.header.title = format_input(content[pc['HEADER_TITLE']])
profile.header.body = format_input(content[pc['HEADER_BODY']])
self.update_description_content(profile, content[pc['DESC_TABLE']])
for tbl_name in profile.description.get_keys():
if len(tbl_name) == 0:
obj_response.alert("ERROR: notes cannot have empty titles")
return
master_profile = Profile.objects.get(username=profile.username)
profile.id = master_profile.id
try:
profile.save()
except Exception as e:
print 'faile', e
obj_response.alert('bad input, cannot save')
return
master_profile = Profile.objects.get(username=profile.username)
if not profile.bkg_img and master_profile.bkg_img:
master_profile.bkg_img = None
master_profile.save()
if not profile.bkg_color and master_profile.bkg_color:
master_profile.bkg_color = None
master_profile.save()
for img in session['dropbox_paths_to_delete']:
profile.dropbox_delete_file(img)
del session['dropbox_paths_to_delete']
profile.dropbox_cleanup()
obj_response.redirect(url_for('profiles.detail',
slug=profile.username))
def add_imglink_handler(self, obj_response, content):
def get_default_imglink_img(idx):
if (idx > 3):
idx = randint(1, 3)
if idx == 1:
return url_for('static', filename='img/icon_email.png', _external=True)
if idx == 2:
return url_for('static', filename='img/icon_twitter.png', _external=True)
return url_for('static', filename='img/icon_fa.png', _external=True)
profile = self.get_user_profile_edit()
if len(profile.sidebar.img_links) > 2:
return
imglink = ImageLink(img_url=get_default_imglink_img(len(profile.sidebar.img_links)), link_url='')
profile.sidebar.img_links.append(imglink)
self.save_user_profile_edit(profile)
self.sidebar_html_update(obj_response, profile)
def update_imglink_image_handler(self, obj_response, content):
profile = self.get_user_profile_edit()
idx = content['num']
src = Path(private_path=content['files'][0]['path'])
dst = profile.dropbox_get_non_gallery_image_directory().join(content['files'][0]['path'])
if profile.sidebar.img_links[idx].dropbox_path:
session['dropbox_paths_to_delete'].append(profile.sidebar.img_links[idx].dropbox_path)
profile.sidebar.img_links[idx].dropbox_path = profile.dropbox_move_file(src, dst)
profile.sidebar.img_links[idx].share()
self.save_user_profile_edit(profile)
self.sidebar_html_update(obj_response, profile)
@ajax_catch_error
def update_imglink_handler(self, obj_response, content):
profile = self.get_user_profile_edit()
if content['href'] and not is_url(content['href']):
obj_response.alert('NOT A URL')
return
profile.sidebar.img_links[content['num']].link_url = content['href']
self.save_user_profile_edit(profile)
self.sidebar_html_update(obj_response, profile)
@ajax_catch_error
def del_imglink_handler(self, obj_response, content):
profile = self.get_user_profile_edit()
num = content['num']
if len(profile.sidebar.img_links) > 0:
del profile.sidebar.img_links[num]
self.save_user_profile_edit(profile)
self.sidebar_html_update(obj_response, profile)
@ajax_catch_error
def update_avatar_url_handler(self, obj_response, content):
profile = self.get_user_profile_edit()
new_file = content['files'][0]
src = Path(private_path=new_file['path'])
dst = profile.dropbox_get_non_gallery_image_directory().join(new_file['path'])
if profile.header.avatar_dropbox_path:
session['dropbox_paths_to_delete'].append(profile.header.avatar_dropbox_path)
profile.header.avatar_dropbox_path = profile.dropbox_move_file(src, dst)
profile.header.avatar_dropbox_path.share()
self.save_user_profile_edit(profile)
avtimg_macro = get_template_attribute('profiles/_neo.html', 'render_avatarimg')
avt_html = avtimg_macro(profile.header)
obj_response.html("#avatar", avt_html)
@ajax_catch_error
def add_desc_content_handler(self, obj_response, content):
profile = self.get_user_profile_edit()
num_tabls = len(profile.description.tables)
if num_tabls >= 10:
return
attr_tabl = EditableImageTable()
attr_tabl.text = 'write text here...'
name = 'Note' + str(num_tabls)
while name in profile.description.tables:
num_tabls += 1
name = 'Note' + str(num_tabls)
attr_tabl.order = num_tabls
self.update_description_content(profile, content[pc['DESC_TABLE']])
profile.description.add_table(name, attr_tabl)
self.save_user_profile_edit(profile)
self.description_content_html_update(obj_response, profile)
@ajax_catch_error
def del_desc_content_handler(self, obj_response, content):
idx = int(content['num'])
if idx < 0:
return
profile = self.get_user_profile_edit()
self.update_description_content(profile, content[pc['DESC_TABLE']])
profile.description.delete_table_by_order(idx)
self.save_user_profile_edit(profile)
self.description_content_html_update(obj_response, profile)
@ajax_catch_error
def change_color_handler(self, obj_response, content):
profile = self.get_user_profile_edit()
profile.colors[content['color']] = content['value']
self.save_user_profile_edit(profile)
@ajax_catch_error
def change_font_handler(self, obj_response, content):
profile = self.get_user_profile_edit()
profile.fonts[content['font']] = content['value']
self.save_user_profile_edit(profile)
@ajax_catch_error
def add_image_to_description_handler(self, obj_response, content):
idx = int(content['num'])
if idx < 0:
return
profile = self.get_user_profile_edit()
table = profile.description.get_tables()[idx]
if not table:
return
for idx, f in enumerate(table.images):
session['dropbox_paths_to_delete'].append(f)
table.images = []
fdir = profile.dropbox_get_non_gallery_image_directory()
for f in content['files']:
src = Path(private_path=f['path'])
dst = fdir.join(f['path'])
table.images.append(profile.dropbox_move_file(src, dst))
table.share()
print 'gorb',profile, table, content
self.save_user_profile_edit(profile)
self.description_content_html_update(obj_response, profile)
def register_sijax(self):
g.sijax.register_callback('save_profile', self.save_profile_handler)
g.sijax.register_callback('discard_changes', self.discard_changes_handler)
g.sijax.register_callback('add_imglink', self.add_imglink_handler)
g.sijax.register_callback('update_imglink', self.update_imglink_handler)
g.sijax.register_callback('update_imglink_image', self.update_imglink_image_handler)
g.sijax.register_callback('del_imglink', self.del_imglink_handler)
g.sijax.register_callback('add_desc_table', self.add_desc_content_handler)
g.sijax.register_callback('del_desc_table', self.del_desc_content_handler)
g.sijax.register_callback('update_avatar_url', self.update_avatar_url_handler)
g.sijax.register_callback('change_color', self.change_color_handler)
g.sijax.register_callback('change_font', self.change_font_handler)
g.sijax.register_callback('add_image_to_description', self.add_image_to_description_handler)
def format_input(string):
return convert_html_entities(sanitize_html(string))
# puts Profile constants into template rendering context
@profiles.context_processor
def inject_constants():
return pc
# Register the urls
profiles.add_url_rule('/<slug>/', view_func=ProfileView.as_view('detail'))
profiles.add_url_rule('/<slug>/edit', view_func=EditView.as_view('edit'), methods=['GET', 'POST'])
profiles.add_url_rule('/site/profiles/<slug>', view_func=ListView.as_view('list'))
from PIL import Image
import StringIO
@app.route('/upload/', methods=('GET', 'POST'))
def upload():
if not app.dropbox.is_authenticated:
response = make_response('fail')
response.mimetype = 'text/plain'
return response
if request.method == 'POST':
data = dict((key, request.files.getlist(key)) for key in request.files.keys())
out = []
for k, f in data.iteritems():
for file_obj in f:
if file_obj:
client = app.dropbox.client
filename = secure_filename(file_obj.filename)
fname, ext = os.path.splitext(filename)
ext = ext[1:]
# allow gifs uncompressed cuz animated gifs are cool as hell
if ext == 'gif':
metadata = (client.put_file('/' + filename, file_obj.read(), overwrite=True))
metadata['dimensions'] = (0, 0)
out.append(metadata)
continue
img = Image.open(request.files[k])
wpercent = (1920 / float(img.size[0]))
hpercent = (1080 / float(img.size[1]))
if wpercent < 1.0:
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((1920, hsize), Image.ANTIALIAS)
elif hpercent < 1.0:
wsize = int((float(img.size[0]) * float(hpercent)))
img = img.resize((wsize, 1080), Image.ANTIALIAS)
filename = secure_filename(file_obj.filename)
fname, ext = os.path.splitext(filename)
ext = ext[1:]
if ext == 'png':
filename = fname + '.jpg'
# save all as jpg to make small as fuck files
ext = 'jpeg'
thumb_io = StringIO.StringIO()
img.save(thumb_io, ext.upper())
metadata = client.put_file('/' + filename, thumb_io.getvalue(), overwrite=True)
metadata['dimensions'] = (img.size[0], img.size[1])
out.append(metadata)
return jsonify(**{'files': out})
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness tests for tf.keras using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.training import gradient_descent
_RANDOM_SEED = 1337
_EVAL_STEPS = 20
_GLOBAL_BATCH_SIZE = 64
# Note: Please make sure the tests in this file are also covered in
# keras_backward_compat_test for features that are supported with both APIs.
all_strategies = [
combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus,
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step,
]
def all_strategy_combinations_with_eager_and_graph_modes():
return combinations.combine(distribution=all_strategies,
mode=['graph', 'eager'])
def all_strategy_combinations_with_graph_mode():
return combinations.combine(distribution=all_strategies, mode=['graph'])
def strategy_and_input_combinations():
def cnn_model_with_batch_norm(**kwargs):
return _create_cnn_model(with_batch_norm=True, **kwargs)
return (
combinations.times(
combinations.combine(distribution=all_strategies),
combinations.combine(mode=['graph', 'eager'],
use_numpy=[True, False],
use_validation_data=[True, False]),
combinations.combine(model_with_data=[
ModelWithData('dnn', _create_dnn_model, _dnn_training_data),
ModelWithData('cnn', _create_cnn_model, _cnn_training_data),
ModelWithData('cnn_batch_norm',
cnn_model_with_batch_norm,
_cnn_training_data,
with_batch_norm=True),
])))
class MaybeDistributionScope(object):
"""Provides a context allowing no distribution strategy."""
def __init__(self, distribution):
self._distribution = distribution
self._scope = None
def __enter__(self):
if self._distribution:
self._scope = self._distribution.scope()
self._scope.__enter__()
def __exit__(self, exc_type, value, traceback):
if self._distribution:
self._scope.__exit__(exc_type, value, traceback)
self._scope = None
class ModelWithData(object):
"""An object giving a good name in combinations.
The model_fn must take two arguments: initial_weights and distribution.
"""
def __init__(self, name, model_fn, data_fn, with_batch_norm=False):
self.name = name
self.model_fn = model_fn
self.data_fn = data_fn
self.with_batch_norm = with_batch_norm
def __repr__(self):
return self.name
def _dnn_training_data():
# TODO(xiejw): Change this back to 10000, once we support final partial
# batch.
num_samples = 9984
x_train = np.random.rand(num_samples, 1)
y_train = 3 * x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
x_predict = [[1.], [2.], [3.], [4.]]
return x_train, y_train, x_predict
def _create_dnn_model(initial_weights=None, distribution=None):
with MaybeDistributionScope(distribution):
# We add few non-linear layers to make it non-trivial.
model = keras.Sequential()
model.add(keras.layers.Dense(10, activation='relu', input_shape=(1,)))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(10, activation='relu'))
model.add(keras.layers.Dense(1))
if initial_weights:
model.set_weights(initial_weights)
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.5),
metrics=['mse'])
return model
def _cnn_training_data(count=_GLOBAL_BATCH_SIZE * _EVAL_STEPS,
shape=(28, 28, 3), num_classes=10):
centers = np.random.randn(num_classes, *shape)
features = []
labels = []
for _ in range(count):
label = np.random.randint(0, num_classes, size=1)[0]
offset = np.random.normal(loc=0, scale=0.1, size=np.prod(shape))
offset = offset.reshape(shape)
labels.append(label)
features.append(centers[label] + offset)
x_train = np.asarray(features, dtype=np.float32)
y_train = np.asarray(labels, dtype=np.float32).reshape((count, 1))
x_predict = x_train
return x_train, y_train, x_predict
def _create_cnn_model(initial_weights=None, distribution=None,
with_batch_norm=False):
with MaybeDistributionScope(distribution):
image = keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = keras.layers.Conv2D(
name='conv1', filters=16, kernel_size=(3, 3), strides=(4, 4))(
image)
if with_batch_norm:
c1 = keras.layers.BatchNormalization(name='bn1')(c1)
c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)
logits = keras.layers.Dense(
10, activation='softmax', name='pred')(
keras.layers.Flatten()(c1))
model = keras.Model(inputs=[image], outputs=[logits])
if initial_weights:
model.set_weights(initial_weights)
model.compile(
optimizer=gradient_descent.GradientDescentOptimizer(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
def batch_wrapper(dataset, batch_size, distribution, repeat=None):
if repeat:
dataset = dataset.repeat(repeat)
# TPUs currently require fully defined input shapes, drop_remainder ensures
# the input will have fully defined shapes.
if isinstance(distribution, tpu_strategy.TPUStrategy):
return dataset.batch(batch_size, drop_remainder=True)
else:
return dataset.batch(batch_size)
def get_batch_size(global_batch_size, distribution):
batch_size = global_batch_size
# TODO(b/118776054): Use global batch size for Keras/DS support.
use_per_core_batch_size = (
distribution and
not distributed_training_utils.global_batch_size_supported(distribution))
if use_per_core_batch_size:
batch_size //= distribution.num_replicas_in_sync
return batch_size
def get_correctness_test_inputs(use_numpy, use_validation_data,
with_distribution, x_train, y_train, x_predict):
"""Generates the inputs for correctness check when enable Keras with DS."""
training_epochs = 2
global_batch_size = _GLOBAL_BATCH_SIZE
batch_size = get_batch_size(global_batch_size, with_distribution)
if use_numpy:
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': training_epochs,
'shuffle': False,
}
if use_validation_data:
eval_inputs = None
training_inputs['validation_data'] = (x_train, y_train)
else:
eval_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
}
predict_inputs = {
'x': np.array(x_predict, dtype=np.float32),
}
else:
if len(x_train) < _GLOBAL_BATCH_SIZE * _EVAL_STEPS:
# Currently, we cannot detech the size of a dataset. So, the eval steps is
# hard coded.
raise ValueError('x_train must have at least '
'_GLOBAL_BATCH_SIZE * _EVAL_STEPS samples')
# For dataset inputs, we do not pass batch_size to
# keras.fit/evaluate/predict. The batch size is part of the dataset.
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
x = batch_wrapper(train_dataset, batch_size, with_distribution,
repeat=training_epochs)
training_inputs = {
'batch_size': None,
'x': x,
'y': None,
'epochs': training_epochs,
'shuffle': False,
'steps_per_epoch': len(x_train) // global_batch_size,
}
if use_validation_data:
eval_inputs = None # Remove the eval_inputs
eval_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
x = batch_wrapper(eval_dataset, batch_size, with_distribution)
training_inputs['validation_data'] = x
training_inputs['validation_steps'] = 5
else:
eval_inputs = {
'batch_size': None,
'x': x,
'y': None,
'steps': _EVAL_STEPS,
}
predict_batch_size = get_batch_size(len(x_predict), with_distribution)
predict_dataset = dataset_ops.Dataset.from_tensor_slices(x_predict)
predict_dataset = batch_wrapper(predict_dataset, predict_batch_size,
with_distribution)
predict_inputs = {
'steps': 1,
'x': predict_dataset,
}
return training_inputs, eval_inputs, predict_inputs
def fit_eval_and_predict(
initial_weights, input_fn, model_fn, distribution=None):
model = model_fn(initial_weights=initial_weights, distribution=distribution)
training_inputs, eval_inputs, predict_inputs = input_fn(distribution)
result = {}
result['training_history_1'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_1'] = model.evaluate(**eval_inputs)
result['weights_1'] = model.get_weights()
if predict_inputs is not None:
result['predict_result_1'] = model.predict(**predict_inputs)
# Train and eval again to mimic user's flow.
result['training_history_2'] = model.fit(**training_inputs).history
if eval_inputs is not None:
result['eval_result_2'] = model.evaluate(**eval_inputs)
result['weights_2'] = model.get_weights()
return result
def compare_results(results_with_ds, results_without_ds, distribution,
testcase):
default_tolerance = 1e-5
tol_table = {}
if isinstance(distribution, (
mirrored_strategy.MirroredStrategy,
mirrored_strategy.CoreMirroredStrategy,
distribute_lib._DefaultDistributionStrategy)): # pylint: disable=protected-access
# TODO(b/119257215): Weights are not exactly the same, so use larger
# tolerance for now. Predict should be related to weights.
tol_table = {
'weights_1': 1e-4,
'weights_2': 1e-4,
'predict_result_1': 1e-4,
}
for key in results_with_ds:
if (key.startswith('training_history') and
isinstance(distribution, tpu_strategy.TPUStrategy) and
distribution.extended.steps_per_run > 1):
# TODO(b/119894254): Enable this test for all cases once the
# underlying bug is fixed.
continue
tolerance = tol_table.get(key, default_tolerance)
testcase.assertAllClose(
results_with_ds[key],
results_without_ds[key],
atol=tolerance,
rtol=tolerance,
msg='Fail to assert {}.'.format(key))
class LearningRateBatchScheduler(keras.callbacks.Callback):
def __init__(self, update_freq=None):
self._update_freq = update_freq
def on_batch_begin(self, batch, logs=None):
if self._update_freq and batch % self._update_freq != 0:
return
# To avoid divergence, limit the value range.
lr = 0.001 * (batch % 10)
keras.backend.set_value(self.model.optimizer.lr, lr)
class TestDistributionStrategyCorrectness(test.TestCase,
parameterized.TestCase):
def _should_skip_tpu_with_eager(self, distribution):
return (context.executing_eagerly() and
isinstance(distribution, tpu_strategy.TPUStrategy))
@combinations.generate(all_strategy_combinations_with_eager_and_graph_modes())
def test_metric_correctness(self, distribution):
if self._should_skip_tpu_with_eager(distribution):
self.skipTest('TPUStrategy does not support eager mode now.')
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
num_samples = 10000
x_train = np.random.randint(0, 2, num_samples)
x_train = np.reshape(x_train, (num_samples, 1))
y_train = x_train
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
# Create identity model.
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(1, input_shape=(1,), kernel_initializer='ones'))
model.compile(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()])
batch_size = 64
batch_size = get_batch_size(batch_size, distribution)
train_dataset = dataset_ops.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = batch_wrapper(train_dataset, batch_size, distribution)
history = model.fit(x=train_dataset, epochs=2, steps_per_epoch=10)
self.assertEqual(history.history['binary_accuracy'], [1.0, 1.0])
@combinations.generate(all_strategy_combinations_with_eager_and_graph_modes())
def test_eval_metrics_correctness(self, distribution):
if self._should_skip_tpu_with_eager(distribution):
self.skipTest('TPUStrategy does not support eager mode now.')
with self.cached_session():
with distribution.scope():
model = keras.Sequential()
model.add(
keras.layers.Dense(
3, activation='relu', input_dim=4, kernel_initializer='ones'))
model.add(
keras.layers.Dense(
1, activation='sigmoid', kernel_initializer='ones'))
model.compile(
loss='mae',
metrics=['accuracy', keras.metrics.BinaryAccuracy()],
optimizer=gradient_descent.GradientDescentOptimizer(0.001))
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype('float32')
y = np.ones((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = batch_wrapper(dataset, 4, distribution)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 1.)
self.assertEqual(outs[2], 1.)
y = np.zeros((100, 1)).astype('float32')
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).repeat()
dataset = batch_wrapper(dataset, 4, distribution)
outs = model.evaluate(dataset, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
@combinations.generate(strategy_and_input_combinations())
def test_correctness(self, distribution, use_numpy, use_validation_data,
model_with_data):
if self._should_skip_tpu_with_eager(distribution):
self.skipTest('TPUStrategy does not support eager mode now.')
if context.executing_eagerly() and use_numpy:
self.skipTest('Numpy as inputs is not supported with strategy in eager.')
if context.executing_eagerly() and use_validation_data:
self.skipTest('TODO')
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
model_fn, data_fn = model_with_data.model_fn, model_with_data.data_fn
# Train, eval, and predict datasets are created with the same input numpy
# arrays.
x_train, y_train, x_predict = data_fn()
# The model is built once and the initial weights are saved.
# This is used to initialize the model for both the distribution and
# non-distribution run.
model = model_fn()
initial_weights = model.get_weights()
def input_fn(dist):
return get_correctness_test_inputs(
use_numpy, use_validation_data, dist, x_train, y_train, x_predict)
results_with_ds = fit_eval_and_predict(
initial_weights, input_fn=input_fn, model_fn=model_fn,
distribution=distribution)
results_without_ds = fit_eval_and_predict(
initial_weights, input_fn=input_fn, model_fn=model_fn,
distribution=None)
# First, special case, for multi-replica distributed training, batch norm
# is not aggregated globally. So it is expected to have different weights.
if (model_with_data.with_batch_norm and
distribution.num_replicas_in_sync > 1):
with self.assertRaises(AssertionError):
compare_results(results_with_ds, results_without_ds, distribution,
testcase=self)
else:
compare_results(results_with_ds, results_without_ds, distribution,
testcase=self)
@combinations.generate(all_strategy_combinations_with_graph_mode())
def test_dynamic_lr(self, distribution):
with self.cached_session():
keras.backend.set_image_data_format('channels_last')
np.random.seed(_RANDOM_SEED)
random_seed.set_random_seed(_RANDOM_SEED)
x_train, y_train, _ = _dnn_training_data()
model = _create_dnn_model()
initial_weights = model.get_weights()
update_freq = None
if (isinstance(distribution, tpu_strategy.TPUStrategy) and
distribution.extended.steps_per_run > 1):
# For TPUStrategy with steps_per_run > 1, the callback is not invoked
# every step. So, to compare the CPU/TPU, we let the CPU to behave the
# same as TPU.
update_freq = distribution.extended.steps_per_run
def input_fn(dist):
training_epochs = 2
global_batch_size = 64
batch_size = get_batch_size(global_batch_size, dist)
training_inputs = {
'batch_size': batch_size,
'x': x_train,
'y': y_train,
'epochs': training_epochs,
'shuffle': False,
'callbacks': [LearningRateBatchScheduler(update_freq)],
'validation_data': (x_train, y_train)
}
# In this test case, we do not care eval and predict.
eval_inputs, predict_inputs = None, None
return training_inputs, eval_inputs, predict_inputs
results_with_ds = fit_eval_and_predict(
initial_weights, input_fn=input_fn, model_fn=_create_dnn_model,
distribution=distribution)
results_without_ds = fit_eval_and_predict(
initial_weights, input_fn=input_fn, model_fn=_create_dnn_model,
distribution=None)
compare_results(results_with_ds, results_without_ds, distribution,
testcase=self)
if __name__ == '__main__':
test.main()
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
class IteratorTest(test.TestCase):
def testAttemptingGradientsRaiseExceptions(self):
component = constant_op.constant([1])
side = constant_op.constant(0)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset.make_one_shot_iterator().get_next()
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, component)
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, side)
with self.assertRaisesRegexp(LookupError, "No gradient defined"):
gradients_impl.gradients(value, [component, side])
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14).make_one_shot_iterator())
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for i in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % i
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(inter_op_parallelism_threads=1,
use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(num_threads, len(results))
self.assertEqual(num_threads - 1,
len([None for r in results if r is None]))
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(
array_ops.check_numerics(
constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
with self.test_session() as sess:
def consumer_thread():
with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
def testSimpleSharedResource(self):
components = (
np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64)
)
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = (dataset_ops.Dataset.from_tensors(components)
.map(lambda x, y, z: (x, y, z)).make_initializable_iterator(
shared_name="shared_iterator"))
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = dataset_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = (dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
get_next = iterator.get_next()
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = dataset_ops.Iterator.from_structure(dataset_3.output_types,
[None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(dataset_3.output_types, iterator.output_types)
self.assertEqual(dataset_4.output_types, iterator.output_types)
self.assertEqual([None], iterator.output_shapes.as_list())
with self.test_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = dataset_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64), [None])
# Test validation of dataset argument.
iterator = dataset_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors((constant_op.constant(
[1, 2, 3], dtype=dtypes.int32), constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = dataset_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64), constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64))))
if __name__ == "__main__":
test.main()
| |
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from mock import patch, MagicMock, ANY, call
from nose.tools import assert_equal, assert_false, assert_raises
from nose_parameterized import parameterized
from pycalico.datastore_datatypes import Rule, Rules
from policy_parser import *
"""
Specifications for NetworkPolicies and the expected set of
Calico rules that should be generated as a result.
"""
# An empty NetworkPolicy.
network_policy_empty = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": {}}
network_policy_empty_result = []
# NetworkPolicy with only ports defined.
ports = [{"port": 80, "protocol": "TCP"},
{"port": 443, "protocol": "UDP"}]
spec = {"ingress": [{"ports": ports}]}
network_policy_ports = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_ports_result = [
Rule(action="allow", dst_ports=[80], protocol="tcp"),
Rule(action="allow", dst_ports=[443], protocol="udp")
]
# NetworkPolicy with only pods defined by labels.
froms = [{"podSelector": {"matchLabels": {"role": "diags", "tier": "db"}}}]
spec = {"ingress": [{"from": froms}]}
network_policy_froms = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_froms_result = [
Rule(action="allow",
src_selector="tier == 'db' && role == 'diags' && calico/k8s_ns == 'ns'")
]
# NetworkPolicy with ports and pods defined by labels.
froms = [{"podSelector": {"matchLabels": {"role": "diags", "tier": "db"}}}]
ports = [{"port": 80, "protocol": "TCP"},
{"port": 443, "protocol": "UDP"}]
spec = {"ingress": [{"from": froms, "ports": ports}]}
network_policy_both = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_both_result = [
Rule(action="allow",
src_selector="tier == 'db' && role == 'diags' && calico/k8s_ns == 'ns'",
dst_ports=[80], protocol="tcp"),
Rule(action="allow",
src_selector="tier == 'db' && role == 'diags' && calico/k8s_ns == 'ns'",
dst_ports=[443], protocol="udp")
]
# NetworkPolicy with pods and namespaces defined by labels.
froms = [{"namespaceSelector": {"matchLabels": {"role": "prod"}}},
{"podSelector": {"matchLabels": {"tier": "db"}}}]
spec = {"ingress": [{"from": froms}]}
network_policy_from_pods_ns = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_from_pods_ns_result = [
Rule(action="allow", src_selector="k8s_ns/label/role == 'prod'"),
Rule(action="allow", src_selector="tier == 'db' && calico/k8s_ns == 'ns'")
]
# NetworkPolicy with pods and namespaces defined by expressions.
froms = [{"namespaceSelector": {"matchExpressions": [{"key": "role",
"operator": "NotIn",
"values": ["prod", "staging"]}]}},
{"podSelector": {"matchExpressions": [{"key": "tier",
"operator": "In",
"values": ["db"]}]}}]
spec = {"ingress": [{"from": froms}]}
network_policy_from_pods_ns_expr = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_from_pods_ns_expr_result = [
Rule(action="allow", src_selector="k8s_ns/label/role not in { \"prod\", \"staging\" }"),
Rule(action="allow", src_selector="tier in { \"db\" } && calico/k8s_ns == 'ns'")
]
# NetworkPolicy all pods and all namespaces.
froms = [{"namespaceSelector": None},
{"podSelector": None}]
spec = {"ingress": [{"from": froms}]}
network_policy_from_all = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_from_all_result = [
Rule(action="allow", src_selector="has(calico/k8s_ns)"),
Rule(action="allow", src_selector="calico/k8s_ns == 'ns'")
]
# Invalid: Cannot declare both namespaces and pods in same from.
froms = [{"namespaceSelector": None, "podSelector": None}]
spec = {"ingress": [{"from": froms}]}
network_policy_invalid_both = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_invalid_both_result = PolicyError
# No ingress rules - should allow all.
spec = {"ingress": [None]}
network_policy_empty_rule = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_empty_rule_result = [Rule(action="allow")]
# NetworkPolicy with podSelector defined by expressions.
ports = [{"port": 80, "protocol": "TCP"}]
selector = {"matchExpressions": [{"key": "name", "operator": "Exists"},
{"key": "date", "operator": "DoesNotExist"}]}
spec = {"ingress": [{"ports": ports}], "podSelector": selector}
network_policy_pod_sel_expr = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_pod_sel_expr_result = "calico/k8s_ns == 'ns' && has(name) && ! has(date)"
# NetworkPolicy with podSelector defined by invalid expression.
ports = [{"port": 80, "protocol": "TCP"}]
selector = {"matchExpressions": [{"key": "name",
"operator": "SoundsLike",
"values": ["alice", "bob"]}]}
spec = {"ingress": [{"ports": ports}], "podSelector": selector}
network_policy_invalid_op = {"kind": "networkpolicy",
"apiversion": "net.beta.kubernetes.io",
"metadata": {"namespace": "ns",
"name": "test-policy"},
"spec": spec}
network_policy_invalid_op_result = PolicyError
class PolicyParserTest(unittest.TestCase):
"""
Test class for PolicyParser class.
"""
@parameterized.expand([
(network_policy_empty, network_policy_empty_result),
(network_policy_ports, network_policy_ports_result),
(network_policy_froms, network_policy_froms_result),
(network_policy_both, network_policy_both_result),
(network_policy_from_pods_ns, network_policy_from_pods_ns_result),
(network_policy_from_pods_ns_expr, network_policy_from_pods_ns_expr_result),
(network_policy_from_all, network_policy_from_all_result),
(network_policy_invalid_both, network_policy_invalid_both_result),
(network_policy_empty_rule, network_policy_empty_rule_result),
])
def test_parse_policy(self, policy, expected):
# Parse it.
self.parser = PolicyParser(policy)
# If expected result is an exception, try to catch it.
try:
rules = self.parser.calculate_inbound_rules()
except Exception, e:
if isinstance(e, expected):
pass
else:
raise
else:
assert_equal(sorted(rules), sorted(expected))
@parameterized.expand([
(network_policy_pod_sel_expr, network_policy_pod_sel_expr_result),
(network_policy_invalid_op, network_policy_invalid_op_result)
])
def test_pod_selector(self, policy, expected):
# Parse it.
self.parser = PolicyParser(policy)
# If expected result is an exception, try to catch it.
try:
selector = self.parser.calculate_pod_selector()
except Exception, e:
if isinstance(e, expected):
pass
else:
raise
else:
assert_equal(selector, expected)
| |
# -*- coding: utf-8 -*-
'''
Support for ``pkgng``, the new package manager for FreeBSD
.. warning::
This module has been completely rewritten. Up to and including version
0.17.x, it was available as the ``pkgng`` module, (``pkgng.install``,
``pkgng.delete``, etc.), but moving forward this module will no longer be
available as ``pkgng``, as it will behave like a normal Salt ``pkg``
provider. The documentation below should not be considered to apply to this
module in versions <= 0.17.x. If your minion is running a 0.17.x release or
older, then the documentation for this module can be viewed using the
:mod:`sys.doc <salt.modules.sys.doc>` function:
.. code-block:: bash
salt bsdminion sys.doc pkgng
This module provides an interface to ``pkg(8)``. It acts as the default
package provider for FreeBSD 10 and newer. For FreeBSD hosts which have
been upgraded to use pkgng, you will need to override the ``pkg`` provider
by setting the :conf_minion:`providers` parameter in your Minion config
file, in order to use this module to manage packages, like so:
.. code-block:: yaml
providers:
pkg: pkgng
'''
from __future__ import absolute_import
# Import python libs
import copy
import logging
import os
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError, MinionError
import salt.ext.six as six
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pkg'
def __virtual__():
'''
Load as 'pkg' on FreeBSD 10 and greater.
Load as 'pkg' on DragonFly BSD.
Load as 'pkg' on FreeBSD 9 when config option
``providers:pkg`` is set to 'pkgng'.
'''
if __grains__['kernel'] == 'DragonFly':
return __virtualname__
if __grains__['os'] == 'FreeBSD' and float(__grains__['osrelease']) >= 10:
return __virtualname__
if __grains__['os'] == 'FreeBSD' and \
float(__grains__['osmajorrelease']) == 9:
providers = {}
if 'providers' in __opts__:
providers = __opts__['providers']
log.debug('__opts__.providers: {0}'.format(providers))
if providers and 'pkg' in providers and providers['pkg'] == 'pkgng':
log.debug('Configuration option \'providers:pkg\' is set to '
'\'pkgng\', using \'pkgng\' in favor of \'freebsdpkg\'.')
return __virtualname__
return False
def _pkg(jail=None, chroot=None):
'''
Returns the prefix for a pkg command, using -j if a jail is specified, or
-c if chroot is specified.
'''
ret = 'pkg'
if jail:
ret += ' -j {0!r}'.format(jail)
elif chroot:
ret += ' -c {0!r}'.format(chroot)
return ret
def _get_pkgng_version(jail=None, chroot=None):
'''
return the version of 'pkg'
'''
return __salt__['cmd.run']([_pkg(jail, chroot), '--version']).strip()
def _get_version(name, results):
'''
``pkg search`` will return all packages for which the pattern is a match.
Narrow this down and return the package version, or None if no exact match.
'''
for line in results.splitlines():
if not line:
continue
try:
pkgname, pkgver = line.rsplit('-', 1)
except ValueError:
continue
if pkgname == name:
return pkgver
return None
def _contextkey(jail=None, chroot=None, prefix='pkg.list_pkgs'):
'''
As this module is designed to manipulate packages in jails and chroots, use
the passed jail/chroot to ensure that a key in the __context__ dict that is
unique to that jail/chroot is used.
'''
if jail:
return str(prefix) + '.jail_{0}'.format(jail)
elif chroot:
return str(prefix) + '.chroot_{0}'.format(chroot)
return prefix
def parse_config(file_name='/usr/local/etc/pkg.conf'):
'''
Return dict of uncommented global variables.
CLI Example:
.. code-block:: bash
salt '*' pkg.parse_config
``NOTE:`` not working properly right now
'''
ret = {}
if not os.path.isfile(file_name):
return 'Unable to find {0} on file system'.format(file_name)
with salt.utils.fopen(file_name) as ifile:
for line in ifile:
if line.startswith('#') or line.startswith('\n'):
pass
else:
key, value = line.split('\t')
ret[key] = value
ret['config_file'] = file_name
return ret
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
.. note::
This function can accessed using ``pkg.info`` in addition to
``pkg.version``, to more closely match the CLI usage of ``pkg(8)``.
jail
Get package version information for the specified jail
chroot
Get package version information for the specified chroot (ignored if
``jail`` is specified)
with_origin : False
Return a nested dictionary containing both the origin name and version
for each specified package.
.. versionadded:: 2014.1.0
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package name> jail=<jail name or id>
salt '*' pkg.version <package1> <package2> <package3> ...
'''
with_origin = kwargs.pop('with_origin', False)
ret = __salt__['pkg_resource.version'](*names, **kwargs)
if not salt.utils.is_true(with_origin):
return ret
# Put the return value back into a dict since we're adding a subdict
if len(names) == 1:
ret = {names[0]: ret}
origins = __context__.get('pkg.origin', {})
return dict([
(x, {'origin': origins.get(x, ''), 'version': y})
for x, y in six.iteritems(ret)
])
# Support pkg.info get version info, since this is the CLI usage
info = salt.utils.alias_function(version, 'info')
def refresh_db(jail=None, chroot=None, force=False):
'''
Refresh PACKAGESITE contents
.. note::
This function can accessed using ``pkg.update`` in addition to
``pkg.refresh_db``, to more closely match the CLI usage of ``pkg(8)``.
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
jail
Refresh the pkg database within the specified jail
chroot
Refresh the pkg database within the specified chroot (ignored if
``jail`` is specified)
force
Force a full download of the repository catalog without regard to the
respective ages of the local and remote copies of the catalog.
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db force=True
'''
opts = ''
if force:
opts += ' -f'
return __salt__['cmd.retcode'](
'{0} update{1}'.format(_pkg(jail, chroot), opts),
python_shell=False) == 0
# Support pkg.update to refresh the db, since this is the CLI usage
update = salt.utils.alias_function(refresh_db, 'update')
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package name> jail=<jail name or id>
salt '*' pkg.latest_version <package name> chroot=/path/to/chroot
'''
if len(names) == 0:
return ''
ret = {}
# Initialize the dict with empty strings
for name in names:
ret[name] = ''
jail = kwargs.get('jail')
chroot = kwargs.get('chroot')
pkgs = list_pkgs(versions_as_list=True, jail=jail, chroot=chroot)
if salt.utils.compare_versions(_get_pkgng_version(jail, chroot), '>=', '1.6.0'):
quiet = True
else:
quiet = False
for name in names:
cmd = [_pkg(jail, chroot), 'search']
if quiet:
cmd.append('-q')
cmd.append(name)
pkgver = _get_version(
name,
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='trace')
)
if pkgver is not None:
installed = pkgs.get(name, [])
if not installed:
ret[name] = pkgver
else:
if not any(
(salt.utils.compare_versions(ver1=x,
oper='>=',
ver2=pkgver)
for x in installed)
):
ret[name] = pkgver
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.alias_function(latest_version, 'available_version')
def list_pkgs(versions_as_list=False,
jail=None,
chroot=None,
with_origin=False,
**kwargs):
'''
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
jail
List the packages in the specified jail
chroot
List the packages in the specified chroot (ignored if ``jail`` is
specified)
with_origin : False
Return a nested dictionary containing both the origin name and version
for each installed package.
.. versionadded:: 2014.1.0
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs jail=<jail name or id>
salt '*' pkg.list_pkgs chroot=/path/to/chroot
'''
# not yet implemented or not applicable
if any([salt.utils.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
versions_as_list = salt.utils.is_true(versions_as_list)
contextkey_pkg = _contextkey(jail, chroot)
contextkey_origins = _contextkey(jail, chroot, prefix='pkg.origin')
if contextkey_pkg in __context__:
ret = copy.deepcopy(__context__[contextkey_pkg])
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
if salt.utils.is_true(with_origin):
origins = __context__.get(contextkey_origins, {})
return dict([
(x, {'origin': origins.get(x, ''), 'version': y})
for x, y in six.iteritems(ret)
])
return ret
ret = {}
origins = {}
cmd = '{0} info -ao'.format(_pkg(jail, chroot))
out = __salt__['cmd.run_stdout'](
cmd,
python_shell=False,
output_loglevel='trace')
for line in out.splitlines():
if not line:
continue
try:
pkg, origin = line.split()
pkgname, pkgver = pkg.rsplit('-', 1)
except ValueError:
continue
__salt__['pkg_resource.add_pkg'](ret, pkgname, pkgver)
origins[pkgname] = origin
__salt__['pkg_resource.sort_pkglist'](ret)
__context__[contextkey_pkg] = copy.deepcopy(ret)
__context__[contextkey_origins] = origins
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
if salt.utils.is_true(with_origin):
return dict([
(x, {'origin': origins.get(x, ''), 'version': y})
for x, y in six.iteritems(ret)
])
return ret
def update_package_site(new_url):
'''
Updates remote package repo URL, PACKAGESITE var to be exact.
Must use ``http://``, ``ftp://``, or ``https://`` protocol
CLI Example:
.. code-block:: bash
salt '*' pkg.update_package_site http://127.0.0.1/
'''
config_file = parse_config()['config_file']
__salt__['file.sed'](
config_file, 'PACKAGESITE.*', 'PACKAGESITE\t : {0}'.format(new_url)
)
# add change return later
return True
def stats(local=False, remote=False, jail=None, chroot=None):
'''
Return pkgng stats.
CLI Example:
.. code-block:: bash
salt '*' pkg.stats
local
Display stats only for the local package database.
CLI Example:
.. code-block:: bash
salt '*' pkg.stats local=True
remote
Display stats only for the remote package database(s).
CLI Example:
.. code-block:: bash
salt '*' pkg.stats remote=True
jail
Retrieve stats from the specified jail.
CLI Example:
.. code-block:: bash
salt '*' pkg.stats jail=<jail name or id>
salt '*' pkg.stats jail=<jail name or id> local=True
salt '*' pkg.stats jail=<jail name or id> remote=True
chroot
Retrieve stats from the specified chroot (ignored if ``jail`` is
specified).
CLI Example:
.. code-block:: bash
salt '*' pkg.stats chroot=/path/to/chroot
salt '*' pkg.stats chroot=/path/to/chroot local=True
salt '*' pkg.stats chroot=/path/to/chroot remote=True
'''
opts = ''
if local:
opts += 'l'
if remote:
opts += 'r'
if opts:
opts = '-' + opts
res = __salt__['cmd.run'](
'{0} stats {1}'.format(_pkg(jail, chroot), opts),
python_shell=False,
output_loglevel='trace'
)
res = [x.strip("\t") for x in res.split("\n")]
return res
def backup(file_name, jail=None, chroot=None):
'''
Export installed packages into yaml+mtree file
CLI Example:
.. code-block:: bash
salt '*' pkg.backup /tmp/pkg
jail
Backup packages from the specified jail. Note that this will run the
command within the jail, and so the path to the backup file will be
relative to the root of the jail
CLI Example:
.. code-block:: bash
salt '*' pkg.backup /tmp/pkg jail=<jail name or id>
chroot
Backup packages from the specified chroot (ignored if ``jail`` is
specified). Note that this will run the command within the chroot, and
so the path to the backup file will be relative to the root of the
chroot.
CLI Example:
.. code-block:: bash
salt '*' pkg.backup /tmp/pkg chroot=/path/to/chroot
'''
res = __salt__['cmd.run'](
'{0} backup -d {1!r}'.format(_pkg(jail, chroot), file_name),
python_shell=False,
output_loglevel='trace'
)
return res.split('...')[1]
def restore(file_name, jail=None, chroot=None):
'''
Reads archive created by pkg backup -d and recreates the database.
CLI Example:
.. code-block:: bash
salt '*' pkg.restore /tmp/pkg
jail
Restore database to the specified jail. Note that this will run the
command within the jail, and so the path to the file from which the pkg
database will be restored is relative to the root of the jail.
CLI Example:
.. code-block:: bash
salt '*' pkg.restore /tmp/pkg jail=<jail name or id>
chroot
Restore database to the specified chroot (ignored if ``jail`` is
specified). Note that this will run the command within the chroot, and
so the path to the file from which the pkg database will be restored is
relative to the root of the chroot.
CLI Example:
.. code-block:: bash
salt '*' pkg.restore /tmp/pkg chroot=/path/to/chroot
'''
return __salt__['cmd.run'](
'{0} backup -r {1!r}'.format(_pkg(jail, chroot), file_name),
python_shell=False,
output_loglevel='trace'
)
def audit(jail=None, chroot=None):
'''
Audits installed packages against known vulnerabilities
CLI Example:
.. code-block:: bash
salt '*' pkg.audit
jail
Audit packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.audit jail=<jail name or id>
chroot
Audit packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.audit chroot=/path/to/chroot
'''
return __salt__['cmd.run'](
'{0} audit -F'.format(_pkg(jail, chroot)),
python_shell=False,
output_loglevel='trace'
)
def install(name=None,
fromrepo=None,
pkgs=None,
sources=None,
jail=None,
chroot=None,
orphan=False,
force=False,
glob=False,
local=False,
dryrun=False,
quiet=False,
reinstall_requires=False,
regex=False,
pcre=False,
**kwargs):
'''
Install package(s) from a repository
name
The name of the package to install
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
jail
Install the package into the specified jail
chroot
Install the package into the specified chroot (ignored if ``jail`` is
specified)
orphan
Mark the installed package as orphan. Will be automatically removed
if no other packages depend on them. For more information please
refer to ``pkg-autoremove(8)``.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> orphan=True
force
Force the reinstallation of the package if already installed.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> force=True
glob
Treat the package names as shell glob patterns.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> glob=True
local
Do not update the repository catalogs with ``pkg-update(8)``. A
value of ``True`` here is equivalent to using the ``-U`` flag with
``pkg install``.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> local=True
dryrun
Dru-run mode. The list of changes to packages is always printed,
but no changes are actually made.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> dryrun=True
quiet
Force quiet output, except when dryrun is used, where pkg install
will always show packages to be installed, upgraded or deleted.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> quiet=True
reinstall_requires
When used with force, reinstalls any packages that require the
given package.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> reinstall_requires=True force=True
.. versionchanged:: 2014.7.0
``require`` kwarg renamed to ``reinstall_requires``
fromrepo
In multi-repo mode, override the pkg.conf ordering and only attempt
to download packages from the named repository.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name> fromrepo=repo
regex
Treat the package names as a regular expression
CLI Example:
.. code-block:: bash
salt '*' pkg.install <regular expression> regex=True
pcre
Treat the package names as extended regular expressions.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <extended regular expression> pcre=True
'''
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params is None or len(pkg_params) == 0:
return {}
opts = 'y'
repo_opts = ''
if salt.utils.is_true(orphan):
opts += 'A'
if salt.utils.is_true(force):
opts += 'f'
if salt.utils.is_true(glob):
opts += 'g'
if salt.utils.is_true(local):
opts += 'U'
if salt.utils.is_true(dryrun):
opts += 'n'
if salt.utils.is_true(quiet):
opts += 'q'
if salt.utils.is_true(reinstall_requires):
opts += 'R'
if fromrepo:
repo_opts += 'r {0}'.format(fromrepo)
if salt.utils.is_true(regex):
opts += 'x'
if salt.utils.is_true(pcre):
opts += 'X'
if opts:
opts = '-' + opts
if repo_opts:
repo_opts = '-' + repo_opts
old = list_pkgs(jail=jail, chroot=chroot)
if pkg_type == 'file':
pkg_cmd = 'add'
# pkg add has smaller set of options (i.e. no -y or -n), filter below
opts = ''.join([opt for opt in opts if opt in 'AfIMq'])
targets = pkg_params
elif pkg_type == 'repository':
pkg_cmd = 'install'
if pkgs is None and kwargs.get('version') and len(pkg_params) == 1:
# Only use the 'version' param if 'name' was not specified as a
# comma-separated list
pkg_params = {name: kwargs.get('version')}
targets = []
for param, version_num in six.iteritems(pkg_params):
if version_num is None:
targets.append(param)
else:
targets.append('{0}-{1}'.format(param, version_num))
cmd = '{0} {1} {2} {3} {4}'.format(
_pkg(jail, chroot), pkg_cmd, repo_opts, opts, ' '.join(targets)
)
if pkg_cmd == 'add' and salt.utils.is_true(dryrun):
# pkg add doesn't have a dryrun mode, so echo out what will be run
return cmd
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='trace')
__context__.pop(_contextkey(jail, chroot), None)
__context__.pop(_contextkey(jail, chroot, prefix='pkg.origin'), None)
new = list_pkgs(jail=jail, chroot=chroot)
return salt.utils.compare_dicts(old, new)
def remove(name=None,
pkgs=None,
jail=None,
chroot=None,
all_installed=False,
force=False,
glob=False,
dryrun=False,
recurse=False,
regex=False,
pcre=False,
**kwargs):
'''
Remove a package from the database and system
.. note::
This function can accessed using ``pkg.delete`` in addition to
``pkg.remove``, to more closely match the CLI usage of ``pkg(8)``.
name
The package to remove
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
jail
Delete the package from the specified jail
chroot
Delete the package from the specified chroot (ignored if ``jail`` is
specified)
all_installed
Deletes all installed packages from the system and empties the
database. USE WITH CAUTION!
CLI Example:
.. code-block:: bash
salt '*' pkg.remove all all_installed=True force=True
force
Forces packages to be removed despite leaving unresolved
dependencies.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name> force=True
glob
Treat the package names as shell glob patterns.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name> glob=True
dryrun
Dry run mode. The list of packages to delete is always printed, but
no packages are actually deleted.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name> dryrun=True
recurse
Delete all packages that require the listed package as well.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name> recurse=True
regex
Treat the package names as regular expressions.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <regular expression> regex=True
pcre
Treat the package names as extended regular expressions.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <extended regular expression> pcre=True
'''
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
targets = []
old = list_pkgs(jail=jail, chroot=chroot, with_origin=True)
for pkg in pkg_params.items():
# FreeBSD pkg supports `openjdk` and `java/openjdk7` package names
if pkg[0].find("/") > 0:
origin = pkg[0]
pkg = [k for k, v in old.iteritems() if v['origin'] == origin][0]
if pkg[0] in old:
targets.append(pkg[0])
if not targets:
return {}
opts = ''
if salt.utils.is_true(all_installed):
opts += 'a'
if salt.utils.is_true(force):
opts += 'f'
if salt.utils.is_true(glob):
opts += 'g'
if salt.utils.is_true(dryrun):
opts += 'n'
if not salt.utils.is_true(dryrun):
opts += 'y'
if salt.utils.is_true(recurse):
opts += 'R'
if salt.utils.is_true(regex):
opts += 'x'
if salt.utils.is_true(pcre):
opts += 'X'
if opts:
opts = '-' + opts
cmd = '{0} delete {1} {2}'.format(
_pkg(jail, chroot), opts, ' '.join(targets)
)
__salt__['cmd.run'](cmd, python_shell=False, output_loglevel='trace')
__context__.pop(_contextkey(jail, chroot), None)
__context__.pop(_contextkey(jail, chroot, prefix='pkg.origin'), None)
new = list_pkgs(jail=jail, chroot=chroot, with_origin=True)
return salt.utils.compare_dicts(old, new)
# Support pkg.delete to remove packages, since this is the CLI usage
delete = salt.utils.alias_function(remove, 'delete')
# No equivalent to purge packages, use remove instead
purge = salt.utils.alias_function(remove, 'purge')
def upgrade(*names, **kwargs):
'''
Upgrade named or all packages (run a ``pkg upgrade``). If <package name> is
omitted, the operation is executed on all packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade <package name>
jail
Audit packages within the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade <package name> jail=<jail name or id>
chroot
Audit packages within the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade <package name> chroot=/path/to/chroot
Any of the below options can also be used with ``jail`` or ``chroot``.
force
Force reinstalling/upgrading the whole set of packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade <package name> force=True
local
Do not update the repository catalogs with ``pkg-update(8)``. A value
of ``True`` here is equivalent to using the ``-U`` flag with ``pkg
upgrade``.
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade <package name> local=True
dryrun
Dry-run mode: show what packages have updates available, but do not
perform any upgrades. Repository catalogs will be updated as usual
unless the local option is also given.
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade <package name> dryrun=True
'''
ret = {'changes': {},
'result': True,
'comment': '',
}
jail = kwargs.pop('jail', None)
chroot = kwargs.pop('chroot', None)
force = kwargs.pop('force', False)
local = kwargs.pop('local', False)
dryrun = kwargs.pop('dryrun', False)
opts = ''
if force:
opts += 'f'
if local:
opts += 'L'
if dryrun:
opts += 'n'
if not dryrun:
opts += 'y'
if opts:
opts = '-' + opts
old = list_pkgs()
call = __salt__['cmd.run_all'](
'{0} upgrade {1} {2}'.format(_pkg(jail, chroot), opts, ' '.join(names)),
python_shell=False,
output_loglevel='trace'
)
if call['retcode'] != 0:
ret['result'] = False
if 'stderr' in call:
ret['comment'] += call['stderr']
if 'stdout' in call:
ret['comment'] += call['stdout']
else:
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret['changes'] = salt.utils.compare_dicts(old, new)
return ret
def clean(jail=None, chroot=None):
'''
Cleans the local cache of fetched remote packages
CLI Example:
.. code-block:: bash
salt '*' pkg.clean
salt '*' pkg.clean jail=<jail name or id>
salt '*' pkg.clean chroot=/path/to/chroot
'''
return __salt__['cmd.run'](
'{0} clean'.format(_pkg(jail, chroot)),
python_shell=False,
output_loglevel='trace'
)
def autoremove(jail=None, chroot=None, dryrun=False):
'''
Delete packages which were automatically installed as dependencies and are
not required anymore.
dryrun
Dry-run mode. The list of changes to packages is always printed,
but no changes are actually made.
CLI Example:
.. code-block:: bash
salt '*' pkg.autoremove
salt '*' pkg.autoremove jail=<jail name or id>
salt '*' pkg.autoremove dryrun=True
salt '*' pkg.autoremove jail=<jail name or id> dryrun=True
'''
opts = ''
if dryrun:
opts += 'n'
else:
opts += 'y'
if opts:
opts = '-' + opts
return __salt__['cmd.run'](
'{0} autoremove {1}'.format(_pkg(jail, chroot), opts),
python_shell=False,
output_loglevel='trace'
)
def check(jail=None,
chroot=None,
depends=False,
recompute=False,
checksum=False):
'''
Sanity checks installed packages
jail
Perform the sanity check in the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.check jail=<jail name or id>
chroot
Perform the sanity check in the specified chroot (ignored if ``jail``
is specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.check chroot=/path/to/chroot
Of the below, at least one must be set to ``True``.
depends
Check for and install missing dependencies.
CLI Example:
.. code-block:: bash
salt '*' pkg.check recompute=True
recompute
Recompute sizes and checksums of installed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.check depends=True
checksum
Find invalid checksums for installed packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.check checksum=True
'''
if not any((depends, recompute, checksum)):
return 'One of depends, recompute, or checksum must be set to True'
opts = ''
if depends:
opts += 'dy'
if recompute:
opts += 'r'
if checksum:
opts += 's'
if opts:
opts = '-' + opts
return __salt__['cmd.run'](
'{0} check {1}'.format(_pkg(jail, chroot), opts),
python_shell=False,
output_loglevel='trace'
)
def which(path, jail=None, chroot=None, origin=False, quiet=False):
'''
Displays which package installed a specific file
CLI Example:
.. code-block:: bash
salt '*' pkg.which <file name>
jail
Perform the check in the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.which <file name> jail=<jail name or id>
chroot
Perform the check in the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.which <file name> chroot=/path/to/chroot
origin
Shows the origin of the package instead of name-version.
CLI Example:
.. code-block:: bash
salt '*' pkg.which <file name> origin=True
quiet
Quiet output.
CLI Example:
.. code-block:: bash
salt '*' pkg.which <file name> quiet=True
'''
opts = ''
if quiet:
opts += 'q'
if origin:
opts += 'o'
if opts:
opts = '-' + opts
return __salt__['cmd.run'](
'{0} which {1} {2}'.format(_pkg(jail, chroot), opts, path),
python_shell=False,
output_loglevel='trace'
)
def search(name,
jail=None,
chroot=None,
exact=False,
glob=False,
regex=False,
pcre=False,
comment=False,
desc=False,
full=False,
depends=False,
size=False,
quiet=False,
origin=False,
prefix=False):
'''
Searches in remote package repositories
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern
jail
Perform the search using the ``pkg.conf(5)`` from the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern jail=<jail name or id>
chroot
Perform the search using the ``pkg.conf(5)`` from the specified chroot
(ignored if ``jail`` is specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern chroot=/path/to/chroot
exact
Treat pattern as exact pattern.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern exact=True
glob
Treat pattern as a shell glob pattern.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern glob=True
regex
Treat pattern as a regular expression.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern regex=True
pcre
Treat pattern as an extended regular expression.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern pcre=True
comment
Search for pattern in the package comment one-line description.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern comment=True
desc
Search for pattern in the package description.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern desc=True
full
Displays full information about the matching packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern full=True
depends
Displays the dependencies of pattern.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern depends=True
size
Displays the size of the package
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern size=True
quiet
Be quiet. Prints only the requested information without displaying
many hints.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern quiet=True
origin
Displays pattern origin.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern origin=True
prefix
Displays the installation prefix for each package matching pattern.
CLI Example:
.. code-block:: bash
salt '*' pkg.search pattern prefix=True
'''
opts = ''
if exact:
opts += 'e'
if glob:
opts += 'g'
if regex:
opts += 'x'
if pcre:
opts += 'X'
if comment:
opts += 'c'
if desc:
opts += 'D'
if full:
opts += 'f'
if depends:
opts += 'd'
if size:
opts += 's'
if quiet:
opts += 'q'
if origin:
opts += 'o'
if prefix:
opts += 'p'
if opts:
opts = '-' + opts
return __salt__['cmd.run'](
'{0} search {1} {2}'.format(_pkg(jail, chroot), opts, name),
python_shell=False,
output_loglevel='trace'
)
def fetch(name,
jail=None,
chroot=None,
fetch_all=False,
quiet=False,
fromrepo=None,
glob=True,
regex=False,
pcre=False,
local=False,
depends=False):
'''
Fetches remote packages
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <package name>
jail
Fetch package in the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <package name> jail=<jail name or id>
chroot
Fetch package in the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <package name> chroot=/path/to/chroot
fetch_all
Fetch all packages.
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <package name> fetch_all=True
quiet
Quiet mode. Show less output.
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <package name> quiet=True
fromrepo
Fetches packages from the given repo if multiple repo support
is enabled. See ``pkg.conf(5)``.
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <package name> fromrepo=repo
glob
Treat pkg_name as a shell glob pattern.
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <package name> glob=True
regex
Treat pkg_name as a regular expression.
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <regular expression> regex=True
pcre
Treat pkg_name is an extended regular expression.
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <extended regular expression> pcre=True
local
Skip updating the repository catalogs with pkg-update(8). Use the
local cache only.
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <package name> local=True
depends
Fetch the package and its dependencies as well.
CLI Example:
.. code-block:: bash
salt '*' pkg.fetch <package name> depends=True
'''
opts = ''
repo_opts = ''
if fetch_all:
opts += 'a'
if quiet:
opts += 'q'
if fromrepo:
repo_opts += 'r {0}'.format(fromrepo)
if glob:
opts += 'g'
if regex:
opts += 'x'
if pcre:
opts += 'X'
if local:
opts += 'L'
if depends:
opts += 'd'
if opts:
opts = '-' + opts
if repo_opts:
repo_opts = '-' + repo_opts
return __salt__['cmd.run'](
'{0} fetch -y {1} {2} {3}'.format(
_pkg(jail, chroot), opts, repo_opts, name
),
python_shell=False,
output_loglevel='trace'
)
def updating(name,
jail=None,
chroot=None,
filedate=None,
filename=None):
''''
Displays UPDATING entries of software packages
CLI Example:
.. code-block:: bash
salt '*' pkg.updating foo
jail
Perform the action in the specified jail
CLI Example:
.. code-block:: bash
salt '*' pkg.updating foo jail=<jail name or id>
chroot
Perform the action in the specified chroot (ignored if ``jail`` is
specified)
CLI Example:
.. code-block:: bash
salt '*' pkg.updating foo chroot=/path/to/chroot
filedate
Only entries newer than date are shown. Use a YYYYMMDD date format.
CLI Example:
.. code-block:: bash
salt '*' pkg.updating foo filedate=20130101
filename
Defines an alternative location of the UPDATING file.
CLI Example:
.. code-block:: bash
salt '*' pkg.updating foo filename=/tmp/UPDATING
'''
opts = ''
if filedate:
opts += 'd {0}'.format(filedate)
if filename:
opts += 'f {0}'.format(filename)
if opts:
opts = '-' + opts
return __salt__['cmd.run'](
'{0} updating {1} {2}'.format(_pkg(jail, chroot), opts, name),
python_shell=False,
output_loglevel='trace'
)
| |
# -*- coding: utf-8 -*-
'''
____ ___ ____________ ___ ___ ____ _________________
/ __ \/ _ | / __/ _/ __/ / _ \/ _ \/ __ \__ / / __/ ___/_ __/
/ /_/ / __ |_\ \_/ /_\ \ / ___/ , _/ /_/ / // / _// /__ / /
\____/_/ |_/___/___/___/ /_/ /_/|_|\____/\___/___/\___/ /_/
Operational Aid Source for Infra-Structure
Created on 2020. 3. 9..
@author: Hye-Churn Jang, CMBU Specialist in Korea, VMware [jangh@vmware.com]
'''
import urllib3
urllib3.disable_warnings()
from builtins import staticmethod
import requests
from sqlalchemy import create_engine
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy import Table as saTable, MetaData
from inspect import isclass
from .common import dumpJson, logDebug
from .struct import PygObj, singleton
from .task import Lock, sleep
class Driver(PygObj):
#===========================================================================
# End User Interface
#===========================================================================
def system(self, *args, **kwargs): pass
def sdk(client):
return singleton(client, st_namespace='SDK', st_name=client.__name__)
#===============================================================================
# Pygics Component Client SDK
#===============================================================================
class ClientPath(PygObj):
def __init__(self, session, method, init_path):
self.__session__ = session
self.__method__ = method
self.__path__ = init_path
def __getattr__(self, path):
self.__path__ = '%s/%s' % (self.__path__, path)
return self
def __call__(self, *args, **kwargs):
if args:
for arg in args:
self.__path__ = '%s/%s' % (self.__path__, str(arg))
if self.__method__ == 'GET':
resp = self.__session__.get(self.__path__)
elif self.__method__ == 'POST':
resp = self.__session__.post(self.__path__, json=kwargs)
elif self.__method__ == 'PUT':
resp = self.__session__.put(self.__path__, json=kwargs)
elif self.__method__ == 'DELETE':
resp = self.__session__.delete(self.__path__)
else:
raise Exception('can not execute unsupported method %s' % self.__method__)
resp.raise_for_status()
return resp.json()
class Client(Driver):
def __init__(self, host, port, base_url):
self.init_path = 'http://%s:%d%s' % (host, port, base_url)
self.session = requests.Session()
@property
def GET(self):
return ClientPath(self.session, 'GET', self.init_path)
@property
def POST(self):
return ClientPath(self.session, 'POST', self.init_path)
@property
def PUT(self):
return ClientPath(self.session, 'PUT', self.init_path)
@property
def DELETE(self):
return ClientPath(self.session, 'DELETE', self.init_path)
#===============================================================================
# Rest API SDK
#===============================================================================
class ModelList(list):
def __init__(self, model, api=None, parent=None):
list.__init__(self)
self.model = model
self.api = api
self.parent = parent
self.list_layer = model.__meta__.list_layer
self.layer = model.__meta__.layer
def __call__(self, **kwargs):
data = self.model.__new__(self.model)
data.__data__(**kwargs)
if self.parent:
data.__parent__ = self.parent
self.append(data)
class Model(dict):
#===========================================================================
# Custom Filter Interface
#===========================================================================
@staticmethod
def __create_filter__(model, intent): pass
@staticmethod
def __list_filter__(model, clause): pass
@staticmethod
def __get_filter__(model, args, keys): pass
@staticmethod
def __update_filter__(model): pass
@staticmethod
def __delete_filter__(model): pass
#===========================================================================
# Class Wrapper for Class Status
#===========================================================================
@classmethod
def __class_create_wrapper__(cls, **intent):
cls.__create_filter__(cls, intent)
return Rest.__create_wrapper__(cls, **intent)
@classmethod
def __class_list_wrapper__(cls, **clause):
cls.__list_filter__(cls, clause)
return Rest.__list_wrapper__(ModelList(cls, cls.__meta__.api), **clause)
@classmethod
def __class_get_wrapper__(cls, *args, **keys):
args = list(args)
cls.__get_filter__(cls, args, keys)
return Rest.__get_wrapper__(cls, *args, **keys)
#===========================================================================
# Init Wrapper for Class Status
#===========================================================================
def __init_create_wrapper__(self, **intent):
self.__create_filter__(self, intent)
Rest.__create_wrapper__(self, **intent)
def __init_get_wrapper__(self, *args, **keys):
self.__get_filter__(self, args, keys)
Rest.__get_wrapper__(self, *args, **keys)
def __init_wrapper__(self, *args, **kwargs):
self.__data__(**kwargs)
#===========================================================================
# Inst Wrapper for Instance Status
#===========================================================================
def __inst_create_wrapper__(self, **intent):
self.__create_filter__(self, intent)
data = self.__class__.__new__(self.__class__)
data.__parent__ = self.__parent__
return Rest.__create_wrapper__(data, **intent)
def __inst_list_wrapper__(self, **clause):
self.__list_filter__(self, clause)
data = ModelList(self.__class__, self.__api__(), self.__parent__)
return Rest.__list_wrapper__(data, **clause)
def __inst_get_wrapper__(self, *args, **keys):
args = list(args)
self.__get_filter__(self, args, keys)
data = self.__class__.__new__(self.__class__)
data.__parent__ = self.__parent__
return Rest.__get_wrapper__(data, *args, **keys)
def __inst_update_wrapper__(self):
self.__update_filter__(self)
return Rest.__update_wrapper__(self)
def __inst_delete_wrapper__(self):
self.__delete_filter__(self)
return Rest.__delete_wrapper__(self)
#===========================================================================
# Call Wrapper for Instance Status
#===========================================================================
def __call_create_wrapper__(self, **intent):
return self.create(**intent)
def __call_get_wrapper__(self, *args, **keys):
return self.get(*args, **keys)
def __call_wrapper__(self, *args, **kwargs):
return self.__data__(**kwargs)
#===========================================================================
# Internal Data Actions
#===========================================================================
@classmethod
def __model__(cls, **kwargs):
model = cls.__new__(cls)
return model.__data__(**kwargs)
@classmethod
def __help__(cls):
call = None
life = []
prop = []
subm = []
func = []
for name, attr in cls.__dict__.items():
attr_type = str(type(attr))
attr_addr = str(attr)
if 'property' in attr_type:
prop.append(name)
elif 'function' in attr_type:
if '__inst_' in attr_addr:
if '__inst_' not in name:
life.append(name + '()')
elif '__' not in name:
func.append(name + '()')
elif '__init_wrapper__' == name:
if '__init_get_wrapper__' in attr_addr:
call = 'get()'
elif '__init_create_wrapper__' in attr_addr:
call = 'create()'
elif 'type' in attr_type and issubclass(attr, Model):
if hasattr(attr.__meta__, 'property'):
prop.append(name)
else:
subm.append(name)
elif 'method' in attr_type:
if '__class_' in attr_addr:
if '__class_' not in name:
life.append(name + '()')
return '''{name}
CRUD Actions (self calling action is "{call}"){life}
Properties:{prop}
Sub-Models:{subm}
Defined Actions:{func}{intent}'''.format(
name=cls.__name__,
call=call,
life='\n - ' + '\n - '.join(life) if life else '\n N/A',
prop='\n - ' + '\n - '.join(prop) if prop else '\n N/A',
subm='\n - ' + '\n - '.join(subm) if subm else '\n N/A',
func='\n - ' + '\n - '.join(func) if func else '\n N/A',
intent='\n Model Intent\n {}'.format(cls.__doc__) if cls.__doc__ else '')
def __init__(self, *args, **kwargs):
self.__init_wrapper__(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self.__call_wrapper__(*args, **kwargs)
def __data__(self, **kwargs):
dict.__init__(self, **kwargs)
return self
def __keys__(self, keys=None):
if keys == None: keys = {}
if hasattr(self, '__parent__'):
keys = self.__parent__.__keys__(keys)
for k, v in self.__meta__.keys.items():
if k not in keys and v in self:
keys[k] = self[v]
return keys
def __api__(self, sub_url=None, keys=None):
api = self.__meta__.api
if sub_url:
api = api + sub_url
return api.format(**(self.__keys__(keys)))
def __url__(self, sub_url=None, keys=None):
url = self.__meta__.url
if sub_url:
url = url + sub_url
return url.format(**(self.__keys__(keys)))
def __repr__(self):
return '<%s>%s' % (self.__class__.__name__, dumpJson(dict(**self), indent=2))
def __neg__(self):
if hasattr(self, '__parent__'):
return self.__parent__
else: self
def __getattribute__(self, name):
if name in self: return self[name]
else:
attr = dict.__getattribute__(self, name)
if isclass(attr) and issubclass(attr, Model) and attr != dict.__getattribute__(self, '__class__'):
model = attr.__new__(attr)
model.__parent__ = self
if hasattr(attr, 'create'):
model.create = model.__inst_create_wrapper__
if model.__meta__.def_func == 'create':
model.__call_wrapper__ = model.__call_create_wrapper__
if hasattr(attr, 'list'):
model.list = model.__inst_list_wrapper__
if hasattr(attr, 'get'):
model.get = model.__inst_get_wrapper__
if model.__meta__.def_func == 'get':
model.__call_wrapper__ = model.__call_get_wrapper__
if hasattr(model.__meta__, 'property'):
model.__meta__.driver.__get__(model.__url__(), model)
dict.__setattr__(self, name, model)
return model
return attr
class RestUser:
def __init__(self, sdk, url, username, password, retry=3):
self.sdk = sdk
self.url = url
self.username = username
self.password = password
self.retry = retry
self.sdk.lock.on()
self.sdk.account = self
self.__session__()
self.__headers__()
self.__cookies__()
self.sdk.account = self.sdk.default
self.sdk.lock.off()
def __session__(self):
self.session = requests.Session()
self.session.verify = False
def __headers__(self, **kwargs):
for k, v in kwargs.items():
self.session.headers[k] = v
def __cookies__(self, **kwargs):
for k, v in kwargs.items():
self.session.cookies[k] = v
def __refresh__(self):
self.__headers__()
self.__cookies__()
def __enter__(self):
self.sdk.lock.on()
self.sdk.account = self
return self
def __exit__(self, type, value, traceback):
self.sdk.account = self.sdk.default
self.sdk.lock.off()
class Rest(Driver):
def __init__(self, user_model=RestUser, list_layer=[], layer=[]):
self.user_model = user_model
self.list_layer = list_layer
self.layer = layer
self.account = None
self.default = None
self.lock = Lock()
def system(self, url, username, password):
self.default = self.user_model(self, url, username, password)
self.account = self.default
return self.default
def login(self, username, password, url=None):
if not url and not self.default:
raise Exception('could not find rest api host')
elif not url: url = self.default.url
return self.user_model(self, url, username, password)
#===========================================================================
# Interface for Rest Data Handling
#===========================================================================
def __create__(self, api, data, intent):
resp = self.doPostMethod(api, intent)
if resp.text:
json_data = resp.json()
for layer in data.__meta__.layer:
json_data = json_data[layer]
else:
json_data = {}
data.__data__(**json_data)
def __list__(self, api, list_of_data, clause):
if clause:
query = '&'.join(['%s=%s' % (k, str(v)) for k, v in clause.items()])
api = '%s?%s' % (api, query)
resp = self.doGetMethod(api)
if resp.text:
json_list_data = resp.json()
for list_layer in list_of_data.list_layer:
json_list_data = json_list_data[list_layer]
else:
json_list_data = []
for json_data in json_list_data:
for layer in list_of_data.layer:
json_data = json_data[layer]
list_of_data(**json_data)
def __get__(self, url, data):
resp = self.doGetMethod(url)
if resp.text:
json_data = resp.json()
for layer in data.__meta__.layer:
json_data = json_data[layer]
else:
json_data = {}
data.__data__(**json_data)
def __update_using_put__(self, url, data):
resp = self.doPutMethod(url, data)
if resp.text:
json_data = resp.json()
for layer in data.__meta__.layer:
json_data = json_data[layer]
else:
json_data = {}
data.__data__(**json_data)
def __update_using_patch__(self, url, data):
resp = self.doPatchMethod(url, data)
if resp.text:
json_data = resp.json()
for layer in data.__meta__.layer:
json_data = json_data[layer]
else:
json_data = {}
data.__data__(**json_data)
def __update_using_post__(self, url, data):
resp = self.doPostMethod(url, data)
if resp.text:
json_data = resp.json()
for layer in data.__meta__.layer:
json_data = json_data[layer]
else:
json_data = {}
data.__data__(**json_data)
def __delete__(self, url, data):
self.doDeleteMethod(url)
data.__deleted__ = True
#===========================================================================
# Rest CRUD Wrapper
#===========================================================================
@staticmethod
def __create_wrapper__(model, **intent):
if isinstance(model, Model):
api = model.__api__()
data = model
else:
api = model.__meta__.api
data = model.__new__(model)
model.__meta__.driver.__create__(api, data, intent)
return data
@staticmethod
def __list_wrapper__(list_of_data, **clause):
list_of_data.model.__meta__.driver.__list__(list_of_data.api, list_of_data, clause)
return list_of_data
@staticmethod
def __get_wrapper__(model, *args, **keys):
if len(args) == 1:
keys[model.__meta__.keys_path[0]] = args[0]
if isinstance(model, Model):
url = model.__url__(keys=keys)
data = model
else:
url = model.__meta__.url.format(**keys)
data = model.__new__(model)
model.__meta__.driver.__get__(url, data)
return data
@staticmethod
def __update_wrapper__(model):
url = model.__url__()
if model.__meta__.def_update == 'put':
model.__meta__.driver.__update_using_put__(url, model)
elif model.__meta__.def_update == 'patch':
model.__meta__.driver.__update_using_patch__(url, model)
elif model.__meta__.def_update == 'post':
model.__meta__.driver.__update_using_post__(url, model)
else:
raise Exception('undefined default update driver')
return model
@staticmethod
def __delete_wrapper__(model):
url = model.__url__()
model.__meta__.driver.__delete__(url, model)
return model
#===========================================================================
# HTTP Methods
#===========================================================================
def doGetMethod(self, url):
logDebug('[%s SDK] GET > %s' % (self.__class__.__name__, url))
for delay in range(0, self.account.retry):
resp = self.account.session.get(self.account.url + url)
if resp.status_code == 401:
logDebug('[%s SDK] Refresh Session')
sleep(delay)
self.account.__refresh__()
continue
resp.raise_for_status()
break
return resp
def doPostMethod(self, url, data=None):
if data == None: data = {}
logDebug('[%s SDK] POST > %s < %s' % (self.__class__.__name__, url, dumpJson(data)))
for delay in range(0, self.account.retry):
resp = self.account.session.post(self.account.url + url, json=data)
if resp.status_code == 401:
logDebug('[%s SDK] Refresh Session')
sleep(delay)
self.account.__refresh__()
continue
resp.raise_for_status()
break
return resp
def doPutMethod(self, url, data=None):
if data == None: data = {}
logDebug('[%s SDK] PUT > %s < %s' % (self.__class__.__name__, url, dumpJson(data)))
for delay in range(0, self.account.retry):
resp = self.account.session.put(self.account.url + url, json=data)
if resp.status_code == 401:
logDebug('[%s SDK] Refresh Session')
sleep(delay)
self.account.__refresh__()
continue
resp.raise_for_status()
break
return resp
def doPatchMethod(self, url, data=None):
if data == None: data = {}
logDebug('[%s SDK] PATCH > %s < %s' % (self.__class__.__name__, url, dumpJson(data)))
for delay in range(0, self.account.retry):
resp = self.account.session.patch(self.account.url + url, json=data)
if resp.status_code == 401:
logDebug('[%s SDK] Refresh Session')
sleep(delay)
self.account.__refresh__()
continue
resp.raise_for_status()
break
return resp
def doDeleteMethod(self, url):
logDebug('[%s SDK] DELETE > %s' % (self.__class__.__name__, url))
for delay in range(0, self.account.retry):
resp = self.account.session.delete(self.account.url + url)
if resp.status_code == 401:
logDebug('[%s SDK] Refresh Session')
sleep(delay)
self.account.__refresh__()
continue
resp.raise_for_status()
break
return resp
#===========================================================================
# Schema Decorators
#===========================================================================
def __call__(self, api=None, url=None, list_layer=None, layer=None, **keys):
def wrapper(model):
class Meta(object):
def __repr__(self):
ret = ['KEY : %s' % self.keys, ]
if hasattr(self, 'api'):
ret.append('API : %s' % self.api)
if hasattr(self, 'url'):
ret.append('URL : %s' % self.url)
return '\n'.join(ret)
# create metadata
model.__meta__ = Meta()
model.__meta__.driver = self
model.__meta__.def_func = None
model.__meta__.keys = keys
model.__meta__.keys_count = len(keys)
model.__meta__.keys_path = list(keys.keys())
model.__meta__.keys_data = list(keys.values())
model.__meta__.list_layer = list_layer if list_layer != None else self.list_layer
model.__meta__.layer = layer if layer != None else self.layer
if api != None: model.__meta__.api = api
if url != None: model.__meta__.url = url
logDebug('[SDK.%s] Register > %s' % (self.__class__.__name__, str(model).split("'")[1]))
return model
return wrapper
def entry(self, model):
model.__meta__.entry = True
self.__setattr__(model.__name__, model)
logDebug('[SDK.%s] Register Entry > SDK.%s.%s' % (self.__class__.__name__, self.__class__.__name__, model.__name__))
return model
def create(self, def_model=None):
def register(model):
if not def_model:
model.__meta__.def_func = 'create'
model.__init_wrapper__ = model.__init_create_wrapper__
model.create = model.__class_create_wrapper__
return model
if def_model:
return register(def_model)
else:
return register
def list(self, model):
model.list = model.__class_list_wrapper__
return model
def get(self, def_model=None):
def register(model):
if not def_model:
model.__meta__.def_func = 'get'
model.__init_wrapper__ = model.__init_get_wrapper__
model.get = model.__class_get_wrapper__
return model
if def_model:
return register(def_model)
else:
return register
def update(self, def_method='put'):
def register(model):
def_update = def_method.lower()
if def_update not in ['post', 'put', 'patch']:
raise Exception('unsupport http method for update')
model.__meta__.def_update = def_update.lower()
model.update = model.__inst_update_wrapper__
return model
return register
def delete(self, model):
model.delete = model.__inst_delete_wrapper__
return model
def property(self, model):
model.__meta__.property = True
return model
#===============================================================================
# Database SDK
#===============================================================================
class Table(object):
__schema__ = []
@classmethod
def list(cls, *clause, **kwargs):
return cls.query(*clause, **kwargs).all()
@classmethod
def query(cls, *clause, **kwargs):
order = kwargs['order'] if 'order' in kwargs else []
if not isinstance(order, list):
order = [order]
query = cls.__driver__.__query__(cls)
for c in clause:
query = query.filter(c)
for o in order:
query = query.order_by(o)
return query
def __init__(self, **kargs):
for k, v in kargs.items():
self.__setattr__(k, v)
self.__driver__.__create__(self)
def update(self):
self.__driver__.__update__()
def delete(self):
self.__driver__.__delete__(self)
class Database(Driver):
def __init__(self, proto):
self.proto = proto
self.metadata = MetaData()
def system(self, host, username=None, password=None):
self.host = host
if username and password: self.account = '%s:%s' % (username, password)
elif username: self.account = username
else: self.account = ''
self.url = '%s://%s@%s/%s' % (self.proto, self.account, self.host, self.__class__.__name__.lower())
self.engine = create_engine(self.url)
self.session = Session(self.engine)
self.metadata.create_all(self.engine)
self.autocommit = True
self.lock = Lock()
def __call__(self, model):
name = model.__name__
args = [name, self.metadata] + model.__schema__
table = saTable(*args)
mapper(model, table)
model.__driver__ = self
self.__setattr__(name, model)
return model
def __enter__(self):
self.autocommit = False
self.lock.on()
return self
def __exit__(self, type, value, traceback):
self.session.commit()
self.lock.off()
self.autocommit = True
def __create__(self, data):
self.session.add(data)
if self.autocommit:
self.session.commit()
def __query__(self, cls):
return self.session.query(cls)
def __update__(self):
if self.autocommit:
self.session.commit()
def __delete__(self, data):
self.session.delete(data)
if self.autocommit:
self.session.commit()
| |
import requests
import sys
import time
from pymongo import MongoClient
NUM_MATCH_IDS = 1000
api_keys = []
api_file = open('.env')
for line in api_file:
api_keys.append(line.strip('\n'))
item_list = [
"Abyssal Scepter",
"Archangel's Staff",
"Ardent Censer",
"Athene's Unholy Grail",
"Banner of Command",
"Banshee's Veil",
"Blade of the Ruined King",
"The Black Cleaver",
"The Bloodthirster",
"Dead Man's Plate",
"Essence Reaver",
"Face of the Mountain",
"Frost Queen's Claim",
"Frozen Heart",
"Frozen Mallet",
"Guardian Angel",
"Guinsoo's Rageblade",
"Hextech Gunblade",
"Iceborn Gauntlet",
"Infinity Edge",
"Last Whisper",
"Liandry's Torment",
"Lich Bane",
"Locket of the Iron Solari",
"Luden's Echo",
"Manamune",
"Maw of Malmortius",
"Mejai's Soulstealer",
"Mercurial Scimitar",
"Mikael's Crucible",
"Morellonomicon",
"Nashor's Tooth",
"Ohmwrecker",
"Phantom Dancer",
"Rabadon's Deathcap",
"Randuin's Omen",
"Ravenous Hydra (Melee Only)",
"Righteous Glory",
"Rod of Ages",
"Runaan's Hurricane (Ranged Only)",
"Rylai's Crystal Scepter",
"Sightstone",
"Spirit Visage",
"Statikk Shiv",
"Sterak's Gage",
"Sunfire Cape",
"Sword of the Occult",
"Talisman of Ascension",
"Thornmail",
"Titanic Hydra",
"Trinity Force",
"Twin Shadows",
"Void Staff",
"Warmog's Armor",
"Will of the Ancients",
"Wit's End",
"Youmuu's Ghostblade",
"Zeke's Harbinger",
"Zeke's Herald",
"Zephyr",
"Zhonya's Hourglass",
"Zz'Rot Portal",
"Boots of Swiftness",
"Mercury's Treads",
"Sorcerer's Shoes",
"Boots of Mobility",
"Berserker's Greaves",
"Ionian Boots of Lucidity",
"Perfect Hex Core",
"Enchantment: Warrior",
"Enchantment: Magus",
"Enchantment: Runeglaive",
"Enchantment: Juggernaut",
"Enchantment: Cinderhulk",
"Enchantment: Devourer"
]
def seed(match_ids):
# get static champion data
champions_URL = 'https://global.api.pvp.net/api/lol/static-data/na/v1.2/champion'
champions_payload = {'api_key': api_keys[0], 'dataById': True, 'champData': 'tags'}
champions_request = requests.get(champions_URL, champions_payload)
champions_json = champions_request.json()['data']
#get static item data
items_URL = 'https://global.api.pvp.net/api/lol/static-data/na/v1.2/item'
items_payload = {'api_key': api_keys[0], 'itemListData': 'stats'}
items_request = requests.get(items_URL, items_payload)
items_json = items_request.json()['data']
# iterate over matches
for index, match_id in enumerate(match_ids):
print "INDEX IS: " + str(index)
try:
# get match data
api_tries = 0
while api_tries < 10:
try:
time.sleep(0.01)
match_URL = 'https://na.api.pvp.net/api/lol/na/v2.2/match/' + match_id
match_payload = {'api_key': api_keys[index%1], 'includeTimeline': True}
match_request = requests.get(match_URL, match_payload)
match_json = match_request.json()
break
except:
time.sleep(0.3)
print "API error:", sys.exc_info()[0]
print match_request
# get list of participants
match_participants = match_json['participants']
# get teams
match_teams = match_json['teams']
# dictionary that maps participant ids to final data objects
# data objects created for each participant to track:
# 1. champion used
# 2. major items purchased, in order of purchase
# 3. whether or not they won
data_object_map = {}
for participant in match_participants:
data_object = {}
data_object['champion'] = champions_json[str(participant['championId'])]['name']
data_object['items'] = []
data_object['win'] = next((team['winner'] for team in match_teams if team['teamId'] == participant['teamId']))
data_object_map[str(participant['participantId'])] = data_object
# loop through timeline frames and
# save lists of item related events
item_events = []
item_purchased_events = []
item_sold_events = []
item_undo_events = []
for frame in match_json['timeline']['frames']:
# loop through frame events
if 'events' in frame:
for event in frame['events']:
# save relevant events
if 'itemId' in event:
# TODO handle ITEM_UNDO?
item_events.append(event)
if event['eventType'] == 'ITEM_PURCHASED':
item_purchased_events.append(event)
elif event['eventType'] == 'ITEM_SOLD':
item_sold_events.append(event)
elif event['eventType'] == 'ITEM_UNDO':
item_undo_events.append(event)
for event in item_purchased_events:
data_object = data_object_map[str(event['participantId'])]
if str(event['itemId']) in items_json:
item_name = items_json[str(event['itemId'])]['name']
# TODO check for sightstone
if item_name in item_list and item_name not in data_object['items']:
data_object['items'].append(item_name);
client = MongoClient()
db = client['riot_challenge']
match_data = db['514']
for data_object in data_object_map.values():
match_data.insert_one(data_object)
except KeyError:
print "Unexpected error:", sys.exc_info()[0]
if __name__ == '__main__':
match_id_file = open(sys.argv[1])
starting_index = int(sys.argv[2])
match_ids = []
match_id_file.readline()
for i in range(starting_index):
match_id_file.readline()
for i in range(NUM_MATCH_IDS):
line = match_id_file.readline()
match_ids.append(line.split(',')[0])
seed(match_ids)
| |
from django.core.exceptions import FieldDoesNotExist
from django.apps import apps
# import the logging library
import warnings
import logging
import collections
import persisting_theory
# Get an instance of a logger
logger = logging.getLogger(__name__)
#: The package where autodiscover will try to find preferences to register
from .managers import PreferencesManager
from .settings import preferences_settings
from .exceptions import NotFoundInRegistry
from .types import StringPreference
from .preferences import EMPTY_SECTION, Section
class MissingPreference(StringPreference):
"""
Used as a fallback when the preference object is not found in registries
This can happen for example when you delete a preference in the code,
but don't remove the corresponding entries in database
"""
pass
class PreferenceModelsRegistry(persisting_theory.Registry):
"""Store relationships beetween preferences model and preferences registry"""
look_into = preferences_settings.REGISTRY_MODULE
def register(self, preference_model, preference_registry):
self[preference_model] = preference_registry
preference_registry.preference_model = preference_model
if not hasattr(preference_model, 'registry'):
setattr(preference_model, 'registry', preference_registry)
self.attach_manager(preference_model, preference_registry)
def attach_manager(self, model, registry):
if not hasattr(model, 'instance'):
return
def instance_getter(self):
return registry.manager(instance=self)
getter = property(instance_getter)
instance_class = model._meta.get_field('instance').remote_field.model
setattr(instance_class, preferences_settings.MANAGER_ATTRIBUTE, getter)
def get_by_preference(self, preference):
return self[
preference._meta.proxy_for_model if preference._meta.proxy
else preference.__class__
]
def get_by_instance(self, instance):
"""Return a preference registry using a model instance"""
# we iterate through registered preference models in order to get the instance class
# and check if instance is an instance of this class
for model, registry in self.items():
try:
instance_class = model._meta.get_field('instance').remote_field.model
if isinstance(instance, instance_class):
return registry
except FieldDoesNotExist: # global preferences
pass
return None
preference_models = PreferenceModelsRegistry()
class PreferenceRegistry(persisting_theory.Registry):
"""
Registries are special dictionaries that are used by dynamic-preferences to register and access your preferences.
dynamic-preferences has one registry per Preference type:
- :py:const:`user_preferences`
- :py:const:`site_preferences`
- :py:const:`global_preferences`
In order to register preferences automatically, you must call :py:func:`autodiscover` in your URLconf.
"""
look_into = preferences_settings.REGISTRY_MODULE
#: a name to identify the registry
name = "preferences_registry"
preference_model = None
#: used to reverse urls for sections in form views/templates
section_url_namespace = None
def __init__(self, *args, **kwargs):
super(PreferenceRegistry, self).__init__(*args, **kwargs)
self.section_objects = collections.OrderedDict()
def register(self, preference_class):
"""
Store the given preference class in the registry.
:param preference_class: a :py:class:`prefs.Preference` subclass
"""
preference = preference_class(registry=self)
self.section_objects[preference.section.name] = preference.section
try:
self[preference.section.name][preference.name] = preference
except KeyError:
self[preference.section.name] = collections.OrderedDict()
self[preference.section.name][preference.name] = preference
return preference_class
def _fallback(self, section_name, pref_name):
"""
Create a fallback preference object,
This is used when you have model instances that do not match
any registered preferences, see #41
"""
message = (
'Creating a fallback preference with ' +
'section "{}" and name "{}".' +
'This means you have preferences in your database that ' +
'don\'t match any registered preference. ' +
'If you want to delete these entries, please refer to the ' +
'documentation: https://django-dynamic-preferences.readthedocs.io/en/latest/lifecycle.html') # NOQA
warnings.warn(message.format(section_name, pref_name))
class Fallback(MissingPreference):
section = Section(name=section_name) if section_name else None
name = pref_name
default = ''
help_text = 'Obsolete: missing in registry'
return Fallback()
def get(self, name, section=None, fallback=False):
"""
Returns a previously registered preference
:param section: The section name under which the preference is registered
:type section: str.
:param name: The name of the preference. You can use dotted notation 'section.name' if you want to avoid providing section param
:type name: str.
:param fallback: Should we return a dummy preference object instead of raising an error if no preference is found?
:type name: bool.
:return: a :py:class:`prefs.BasePreference` instance
"""
# try dotted notation
try:
_section, name = name.split(
preferences_settings.SECTION_KEY_SEPARATOR)
return self[_section][name]
except ValueError:
pass
# use standard params
try:
return self[section][name]
except KeyError:
if fallback:
return self._fallback(section_name=section, pref_name=name)
raise NotFoundInRegistry("No such preference in {0} with section={1} and name={2}".format(
self.__class__.__name__, section, name))
def get_by_name(self, name):
"""Get a preference by name only (no section)"""
for section in self.values():
for preference in section.values():
if preference.name == name:
return preference
raise NotFoundInRegistry("No such preference in {0} with name={1}".format(
self.__class__.__name__, name))
def manager(self, **kwargs):
"""Return a preference manager that can be used to retrieve preference values"""
return PreferencesManager(registry=self, model=self.preference_model, **kwargs)
def sections(self):
"""
:return: a list of apps with registered preferences
:rtype: list
"""
return self.keys()
def preferences(self, section=None):
"""
Return a list of all registered preferences
or a list of preferences registered for a given section
:param section: The section name under which the preference is registered
:type section: str.
:return: a list of :py:class:`prefs.BasePreference` instances
"""
if section is None:
return [self[section][name] for section in self for name in self[section]]
else:
return [self[section][name] for name in self[section]]
class PerInstancePreferenceRegistry(PreferenceRegistry):
pass
class GlobalPreferenceRegistry(PreferenceRegistry):
section_url_namespace = 'dynamic_preferences:global.section'
def populate(self, **kwargs):
return self.models(**kwargs)
global_preferences_registry = GlobalPreferenceRegistry()
| |
'''
Framework for parsing geolocated satellite files
Instantiates appropriate class with filename and
optional strings specifying the subtype of file and
an override extension.
All classes store the following parameters:
name - the full pathname of the file
ext - the extension of the file
sub - the subtype of the file
Variables in the file are accessible through the get method
which optionally retrieves individual indices. The optional
parameter indices should specify which indices to retrieve
in a tuple. Multidimensional variables are retrieved as
slices along the "common" dimensions (not those unique to the
variable). Not passing indices will result in the retrieval
of all the data from that variable, though the dimensionality
of that data is not guaranteed. Returns nan's for
missing data values ONLY FOR TYPES THAT START AS FLOATS. Int types
keep their original fill value, but it it scaled and offset along
with everything else (so if you want to test for it, apply the
scale and offset to the fill value before testing)
Parser is responsible for properly applying scale-offset
to retrieved data.
Parsers should throw IOError if passed an invalid file
There are several special methods that must return specific
data regardless of filetype. These methods are used
by mapping functions, so they only have to be implmented
if the desired mapping function requires them.
get_geo_corners() - returns a record array with three fields.
The first field, lat, contains 4 floats,
the latitudes of the 4 corners. The field
lon contains 4 floats, the longitudes of
the 4 corners. The field ind is as
large as it needs to be to contain the
indices. each ind should be castable
to a tuple that can be fed into get.
get_geo_centers() - returns a record arraw with 3 fields. The
first field, lat, contains the latitude of
the pixel center. The lon field contains
the longitude of the pixel center. The
field ind is as alarge as it needs to be
to contain the indices. If cast to a
tuple and fed into the get() function,
it should retrieve the same pixel
The following functions may be implemented or not in any class.
They duplicate the functionality of the get function but in
some cases may allow for much more efficient operation. If
they are not implemented, they are expected to throw a
NotImplementedError when called.
__enter__() - enter method for context manager
__exit__() - exit method for context manager
get_cm(key, ind=(:)) - an alternate form of the get statement meant to be
used inside of a context manager (with statement).
Requiring use of a context manager enables
any files to be left open, improving efficiency.
While it is not required, it is recommended that this
method throw some kind of error when called outside a
context manager. Must operate exactly the same as
the get function in terms of inputs and output.
This framework can be extended by adding classes for particular (sub)class
'''
import os
import sys
import string
import pdb
import tables
import numpy
import pyhdf.HDF
import pyhdf.V
import pyhdf.VS
import pyhdf.SD
import filetypes
def SupportedFileTypes():
'''Return a list of supported file types'''
return [el[:-9] for el in dir(filetypes) if el.endswith("_filetype")]
def getOrbitNumber(fPath):
'''Takes in the path to a nasa omi hdf file and returns the orbit number'''
fid = tables.openFile(fPath)
try:
node = fid.getNode('/', 'HDFEOS INFORMATION/CoreMetadata')
except tables.exceptions.NoSuchNodeError:
node = fid.getNode('/', 'HDFEOS INFORMATION/CoreMetadata.0')
bigString = str(list(node)[0])
strings = bigString.split('\n')
for i in range(len(strings)):
if 'ORBITNUMBER' in strings[i]:
break
line = strings[i+3]
numArray = [int(el) for el in line.split() if el.isdigit()]
fid.close()
return numArray[0]
def getLongName(fPath):
'''Retrieve the long name of an HDFEOS file'''
fid = tables.openFile(fPath)
try:
node = fid.getNode('/', 'HDFEOS INFORMATION/ArchiveMetadata')
except tables.exceptions.NoSuchNodeError:
node = fid.getNode('/', 'HDFEOS INFORMATION/ArchiveMetadata.0')
bigString = str(list(node)[0])
strings = bigString.split('\n')
for i in range(len(strings)):
if 'LONGNAME' in strings[i]:
break
line = strings[i+2]
chunks = line.split('"')
fid.close()
return chunks[-2]
def get_parser(file, filetype, parserParms):
"""Retrieve appropriate instantiated parser for a file"""
# filename = os.path.split(file)[1]
subclass = '{0}_File'.format(filetype)
module = sys.modules[GeoFile.__module__]
parserClass = getattr(module, subclass)
# or GeoFile
extension = ''
subtype = ''
for i in filetype:
if subtype == '' and i in string.ascii_uppercase:
extension += i
else:
subtype += i
return parserClass(file, subtype, extension, **parserParms)
class GeoFile():
"""Provide interface to geofile."""
def __init__(self, filename, subtype='', extension=None):
self.name = filename
self.ext = extension or os.path.splitext(filename)[1][1:]
self.sub = subtype
def get(self, key, indices=None):
raise NotImplementedError
def get_geo_corners(self):
raise NotImplementedError
def get_geo_centers(self):
raise NotImplementedError
def __enter__(self):
raise NotImplementedError
def __exit__(self):
raise NotImplementedError
def get_cm(self, key, indices=None):
raise NotImplementedError
class HDF4File(GeoFile):
"""Provide generic interface for HDF 4 files"""
def __init__(self, filename, subtype='', extension=None):
GeoFile.__init__(self, filename, subtype=subtype, extension=extension)
if pyhdf.HDF.ishdf(self.name):
pass
else:
raise IOError('Attempt to read non HDF4 file as HDF4')
def walkHDF4(self, fid, pathList, vInt, vsInt, sdInt):
"""
Retrives a variable or variable group from an HDF4 file
Requires the file handle fid and a list that contains each
element of the path to the leaf that we want. Returns the leaf,
which may be of any type.
Assumes that the leaf we want is a Vgroup or Vdata datatype.
Arguments vInt and vsInt are the VG and VS interfaces
as defined by pyhdf. If passed, they WILL NOT
be closed.
To repeat, if the interfaces are passed in they will NOT be
safely closed.
"""
leafName = pathList[-1] # name of the leaf we want
# get it the easy way if it's a scientific dataset
sciData = sdInt.datasets()
if leafName in sciData:
return sdInt.select(leafName)
# it must not be a scientific dataset, so walk the file to find it
pList = list(pathList) # shallow Copy
parent = vInt.attach(vInt.getid(-1))
pName = pList.pop(0)
if parent._name != pName:
raise AttributeError("Bad data path (did not start at root).")
while parent._name != leafName:
cName = pList.pop(0)
children = parent.tagrefs()
for (childType, childRef) in children:
if childType == pyhdf.HDF.HC.DFTAG_VG:
child = vInt.attach(childRef)
elif childType == pyhdf.HDF.HC.DFTAG_VH:
child = vsInt.attach(childRef)
elif childType == pyhdf.HDF.HC.DFTAG_NDG:
# we know this can't be it so keep looking
continue
else:
raise IOError('Unknown data format. Check data structure.')
if child._name == cName:
parent.detach()
parent = child
break
else:
child.detach()
if parent is not child:
raise AttributeError('Bad data path. Check parser/data structure.')
return parent
def get(self, key, indices=None, missingValue=None):
"""
Provide get functionality for HDF 4 files.
Assumes absolutely no attributes present.
If missingValue is provided, it will be used to mask floating
point data properly with NaN's. If it is not provided, the data
will be returned as is, -9999.0's and all.
Requires that parser be set up with _nameExpMap and _indexMap
variables. These must be defined as:
_nameExpMap - Dictionary. Keys are field names available to user.
Values are the full path to that field in the HDF file.
_indexMap - Dictionary. Keys are field names available to user.
Values are functions that when passed (var, ind) where
ind is a n-element tuple will return the proper slice.
n is the number of fundamental dimensions of the
file type.
"""
fid = pyhdf.HDF.HDF(self.name)
try:
vInt = fid.vgstart()
vsInt = fid.vstart()
sdInt = pyhdf.SD.SD(self.name)
path = self._nameExpMap[key]
pathList = [el for el in path.split('/') if el] # path list with no empty strings
vNode = self.walkHDF4(fid, pathList, vInt, vsInt, sdInt)
vData = numpy.array(vNode[:])
except AttributeError as err:
raise IOError("No field %s. May be attempt to read non-MOPPIT file as such." % self._nameExpMap[key])
except KeyError:
raise IOError("Attempt to use fieldname not associated with this filetype.")
finally:
# clean up from the bottom up
try:
vNode.detach()
except(NameError):
pass
except(AttributeError):
# must have been a scientific dataset
vNode.endaccess()
try:
sdInt.end()
except(NameError):
pass
try:
vsInt.end()
except(NameError):
pass
try:
vInt.end()
except(NameError):
pass
fid.close
# convert missing values if appropriate
if missingValue and vData.dtype in ['float32', 'float64']:
vData = numpy.where(vData == missingValue, numpy.NaN, vData)
# use indices if we have them
if indices is not None:
# we want specific indices, use _indexMap
indFunc = self._indexMap.get(key, self._indexMap['default'])
return indFunc(vData, indices)
else:
# just fetch everything
return vData
def __enter__(self):
'''Open up file and leave open.'''
self._fid = pyhdf.HDF.HDF(self.name)
self._open_vars = dict()
self._vsInt = self._fid.vstart()
self._vInt = self._fid.vgstart()
self._sdInt = pyhdf.SD.SD(self.name)
return self
def __exit__(self, exc_type, exc_value, traceback):
'''Close file and delete references to file object and nodes.'''
self._sdInt.end()
self._vsInt.end()
self._vInt.end()
self._fid.close()
del self._open_vars
del self._fid
del self._vsInt
del self._vInt
return False
def get_cm(self, key, indices=None, missingValue=None):
"""
Provide get_cm function for HDF files
get_cm works the same as get, but relies on a context manager to speed
up access by allowing it safely leave open variables in memory.
Assumes absolutely no attributes present.
If missingValue is provided, it will be used to mask floating
point data properly with NaN's. If it is not provided, the data
will be returned as is, -9999.0's and all.
Requires that parser be set up with _nameExpMap and _indexMap
variables. These must be defined as:
_nameExpMap - Dictionary. Keys are field names available to user.
Values are the full path to that field in the HDF file.
_indexMap - Dictionary. Keys are field names available to user.
Values are functions that when passed (var, ind) where
ind is a n-element tuple will return the proper slice.
n is the number of fundamental dimensions of the
file type.
"""
# open the variable if it isn't open already.
if key not in self._open_vars.keys():
try:
path = self._nameExpMap[key]
pathList = [el for el in path.split('/') if el]
vNode = self.walkHDF4(self._fid, pathList, self._vInt, self._vsInt, self._sdInt)
self._open_vars[key] = numpy.array(vNode[:])
try:
vNode.detach()
except AttributeError:
# must have been a scientific dataset
vNode.endaccess()
except AttributeError:
raise IOError("No field %s. May be attempt to read non-MOPPIT file as such." % self._nameExpMap[key])
except KeyError:
raise IOError("Attempt to use fieldname %s, which is not associated with this filetype." % key)
# convert missing values if appropriate
if missingValue and self._open_vars[key].dtype in ['float32', 'float64']:
self._open_vars[key] = numpy.where(self._open_vars[key] == missingValue,
numpy.NaN, self._open_vars[key])
# retrieve value of interest from the (newly?) open variable
if indices is not None:
# we want specific indices, use _indexMap
indFunc = self._indexMap.get(key, self._indexMap['default'])
return indFunc(self._open_vars[key], indices)
else:
# just fetch everything
return self._open_vars[key]
class HDFFile(GeoFile):
"""Provide generic interface for HDF 5 files"""
def __init__(self, filename, subtype='', extension=None):
GeoFile.__init__(self, filename, subtype=subtype, extension=extension)
if tables.isHDF5File(self.name): # sanity check
pass
else:
raise IOError('Attempt to read non-HDF 5 file as HDF 5.')
def get(self, key, indices=None):
"""
Provide get function for HDF Files.
Requires that parser be set up with _nameExpMap and _indexMap
variables. These must be defined as:
_nameExpMap - Dictionary. Keys are field names available to user.
Values are the full path to that field in the HDF file.
_indexMap - Dictionary. Keys are field names available to user.
Values are functions that when passed (var, ind) where
ind is a n-element tuple will return the proper slice.
n is the number of fundamental dimensions of the
file type.
"""
fid = tables.openFile(self.name)
try:
var = fid.getNode('/', self._nameExpMap[key])
varAtts = var._v_attrs
missing = getattr(varAtts, '_FillValue', numpy.nan)
# because attributes are single element arrays
# and not zero-element arrays, they change the
# rank of return values when applied. We take
# the first element to get zero-rank arrays
scale = getattr(varAtts,'ScaleFactor', [1.0])[0]
offset = getattr(varAtts, 'Offset', [0.0])[0]
if var[:].dtype in ['float32', 'float64']:
# only cast if we have a type that features nans
var = numpy.where(var == missing, numpy.NaN, var)
if indices is not None:
# we want specific indices, use _indexMap
indFunc = self._indexMap.get(key, self._indexMap['default']) # fetch default if not in index map
else:
# don't bother with _indexMap, just fetch everything
indFunc = lambda var, ind: var[:]
if (scale != 1) or (offset != 0): # avoids casting if we don't need to
return indFunc(var[:], indices)*scale + offset
else:
return indFunc(var[:], indices)
except (tables.exceptions.NoSuchNodeError, AttributeError):
raise IOError("No field %s. May be attempt to read non-KNMI Aura OMI file as such." % self._nameExpMap[key])
except KeyError:
raise KeyError("Attempt to use fieldname not associated with this filetype.")
finally:
fid.close()
def get_cm(self, key, indices=None):
"""
Provide get_cm function for HDF files
get_cm works the same as get, but relies on a context manager to speed
up access to the underlying files. Just as get, it requires that
the parser have _nameExpMap and _indexMap variables. These must be
defined as above.
"""
# open the var if it isn't open already
if key not in self._open_vars.keys():
try:
var = self._fid.getNode('/', self._nameExpMap[key])
varAtts = var._v_attrs
missing = getattr(varAtts, '_FillValue', numpy.nan)
# because attributes are single element arrays
# and not zero-element arrays, they change the
# rank of return values when applied. We take
# the first element to get zero-rank arrays
scale = getattr(varAtts, 'ScaleFactor', [1.0])[0]
offset = getattr(varAtts, 'Offset', [0.0])[0]
if var[:].dtype in ['float32', 'float64']:
# only do nan sub if we don't have to cast
var = numpy.where(var == missing, numpy.NaN, var)
self._open_vars[key] = var
self._scales[key] = scale
self._offsets[key] = offset
except(KeyError):
raise KeyError("No variable " + key + " in file " + self.name)
except(tables.exceptions.NoSuchNodeError, AttributeError):
raise IOError("No field %s. May be attempt to read non-KNMI Aura OMI file as such." % self._nameExpMap[key])
# return the values from the open var
if indices is not None:
# we have indices, use _indexMap
indFunc = self._indexMap.get(key, self._indexMap['default']) # fetch default if not index map
else:
# we want everything, don't bother with _indexMap
indFunc = lambda var, ind: var[:]
if (self._scales[key] != 1) or (self._offsets[key] != 0):
return (indFunc(self._open_vars[key], indices)
*self._scales[key]+self._offsets[key])
else:
return indFunc(self._open_vars[key], indices)
def __enter__(self):
'''Open up file and leave open.'''
self._fid = tables.openFile(self.name, mode='r')
self._open_vars = dict()
self._scales = dict()
self._offsets = dict()
return self
def __exit__(self, exc_type, exc_value, traceback):
'''Close file and delete references to file object and nodes'''
self._fid.close()
del self._open_vars
del self._scales
del self._offsets
del self._fid
return False
class HDFknmiomil2_File(HDFFile):
"""Provide interface to KNMI OMI L2 NRT product"""
_nameExpMap = {"AirMassFactor" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/AirMassFactor",
"AirMassFactorGeometric" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/AirMassFactorGeometric",
"AirMassFactorTropospheric" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/AirMassFactorTropospheric",
"AssimilatedStratosphericSlantColumn" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/AssimilatedStratosphericSlantColumn",
"AssimilatedStratosphericVerticalColumn" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/AssimilatedStratosphericVerticalColumn",
"AveragingKernel" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/AveragingKernel",
"CloudFraction" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/CloudFraction",
"CloudFractionStd" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/CloudFractionStd",
"CloudPressure" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/CloudPressure",
"CloudPressureStd" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/CloudPressureStd",
"CloudRadianceFraction" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/CloudRadianceFraction",
"GhostColumn" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/GhostColumn",
"InstrumentConfigurationId" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/InstrumentConfigurationId",
"MeasurementQualityFlags" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/MeasurementQualityFlags",
"SlantColumnAmountNO2" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/SlantColumnAmountNO2",
"SlantColumnAmountNO2Std" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/SlantColumnAmountNO2Std",
"SurfaceAlbedo" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/SurfaceAlbedo",
"TM4PressurelevelA" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TM4PressurelevelA",
"TM4PressurelevelB" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TM4PressurelevelB",
"TM4SurfacePressure" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TM4SurfacePressure",
"TM4TerrainHeight" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TM4TerrainHeight",
"TM4TropoPauseLevel" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TM4TropoPauseLevel",
"TerrainHeight" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TerrainHeight", # random fact: _FillValue attribute is inaccurate for this field
"TotalVerticalColumn" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TotalVerticalColumn",
"TotalVerticalColumnError" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TotalVerticalColumnError",
"TroposphericColumnFlag" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TroposphericColumnFlag",
"TroposphericVerticalColumn" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TroposphericVerticalColumn",
"TroposphericVerticalColumnError" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TroposphericVerticalColumnError",
"TroposphericVerticalColumnModel" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/TroposphericVerticalColumnModel",
"VCDErrorUsingAvKernel" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/VCDErrorUsingAvKernel",
"VCDTropErrorUsingAvKernel" : "/HDFEOS/SWATHS/DominoNO2/Data Fields/VCDTropErrorUsingAvKernel",
"GroundPixelQualityFlags" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/GroundPixelQualityFlags",
"Latitude" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/Latitude",
"LatitudeCornerpoints" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/LatitudeCornerpoints",
"Longitude" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/Longitude",
"LongitudeCornerpoints" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/LongitudeCornerpoints",
"SolarAzimuthAngle" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/SolarAzimuthAngle",
"SolarZenithAngle" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/SolarZenithAngle",
"Time" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/Time",
"ViewingAzimuthAngle" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/ViewingAzimuthAngle",
"ViewingZenithAngle" : "/HDFEOS/SWATHS/DominoNO2/Geolocation Fields/ViewingZenithAngle"
}
_indexMap = {"default" : lambda var, ind: var[..., ind[0], ind[1]],
"InstrumentConfigurationId" : lambda var, ind: var[ind[0]],
"MeasurementQualityFlags" : lambda var, ind: var[ind[0]],
"TM4PressurelevelA" : lambda var, ind: var[:],
"TM4PressurelevelB" : lambda var, ind: var[:],
"Time" : lambda var, ind: var[ind[0]]
}
def get_geo_corners(self):
lat = self.get('LatitudeCornerpoints')
lon = self.get('LongitudeCornerpoints')
lat = numpy.transpose(lat, (1,2,0))
lon = numpy.transpose(lon, (1,2,0))
ind = numpy.indices(lat.shape[0:2]).transpose((1,2,0))
protoDtype = [('lat', lat.dtype, 4), ('lon', lon.dtype, 4), ('ind', ind.dtype, 2)]
struct = numpy.zeros(lat.shape[0:2], dtype=protoDtype)
(struct['lat'], struct['lon'], struct['ind']) = (lat, lon, ind)
return struct
def get_geo_centers(self):
lat = self.get('Latitude')
lon = self.get('Longitude')
ind = numpy.indices(lat.shape).transpose((1,2,0))
protoDtype = [('lat', lat.dtype), ('lon', lon.dtype), ('ind', ind.dtype, 2)]
struct = numpy.zeros(lat.shape,dtype=protoDtype)
(struct['lat'], struct['lon'], struct['ind']) = (lat, lon, ind)
return struct
class HDFnasaomil2_File(HDFFile):
"""
Provide interface to NASA OMI L2 product, with pixel corners
Pixel corners are retrieved from an extra file that must be accessible
in cornerDir. The files listed in cornerFileList, if any, will be checked
first, followed by any files in cornerDir. If no valid pixel corner file
is found that matches the orbit number of the input file, the parser will
instantiage but get_geo_corners will fail with an IOError.
The corners retrieved are for the visible channel
used by the NO2 algorithm- using this parser for other products may
require altering the parser to use a different channel if
appropriate.
The keys to retrieve variables are the names of the variables within actual
files. Note that the NASA product documentation has the wrong name for
the "SlantColumnAmountH20Std" variable due to a case typo.
Does not support the fields "UnpolFldCoefficients" or "SmallPixelRadiance"
because the dimensionality of these fields changes between files and there
is no way to deal with fields with variable dimension size in the current
framework.
"""
OMIAURANO2_FILE_NAME = "OMI/Aura Nitrogen Dioxide (NO2) Total & "\
"Troposph.Column 1-Orbit L2 Swath 13x24km"
OMIAURANO2_CORNER_FILE_NAME = "OMI/Aura Global Ground Pixel Corners "\
"1-Orbit L2 Swath 13x24km"
def __init__(self, filename, subtype='', extension=None, cornerDir=None,
cornerFileList=None):
HDFFile.__init__(self, filename, subtype, extension)
# make sure filename is actually an input file
if getLongName(filename) != HDFnasaomil2_File.OMIAURANO2_FILE_NAME:
raise IOError('Attempt to read non-NASA OMI L2 file as such.')
# start by assuming we aren't going to find anything
self.pixCorners = None
# see if the corner directory even exists. If it doesn't, we obviously can't
# find a corner file
if os.path.isdir(cornerDir):
# convert the corner files into full pathnames
# unless we were given null string (signal to search directory)
if cornerFileList != ['']:
cornerFileList = [os.path.join(cornerDir, f) for f in cornerFileList]
# get orbit number of file for matching
forbitnumber = getOrbitNumber(filename)
# try using the list
for f in cornerFileList:
if f != '' and getLongName(f) == HDFnasaomil2_File.OMIAURANO2_CORNER_FILE_NAME:
try:
if getOrbitNumber(f) == forbitnumber:
self.pixCorners = f
break
except:
pass
# if necessary, search entire corner file directory
if self.pixCorners == None:
allPossible = [os.path.join(cornerDir, f) for f in os.listdir(cornerDir)]
for f in allPossible:
try:
if tables.isHDF5File(f) \
and getLongName(f) == HDFnasaomil2_File.OMIAURANO2_CORNER_FILE_NAME \
and getOrbitNumber(f) == forbitnumber:
self.pixCorners = f
break
except:
pass
if self.pixCorners == None:
print "No valid corner file found for {0}.".format(filename)
__dataPath = '/HDFEOS/SWATHS/ColumnAmountNO2/Data Fields/'
__geoPath = '/HDFEOS/SWATHS/ColumnAmountNO2/Geolocation Fields/'
_nameExpMap = {'AMFInitial' : __dataPath+'AMFInitial',
'AMFInitialClear' : __dataPath+'AMFInitialClear',
'AMFInitialClearStd' : __dataPath+'AMFInitialClearStd',
'AMFInitialCloudy' : __dataPath+'AMFInitialCloudy',
'AMFInitialCloudyStd' : __dataPath+'AMFInitialCloudyStd',
'AMFInitialStd' : __dataPath+'AMFInitialStd',
'AMFPolluted' : __dataPath+'AMFPolluted',
'AMFPollutedClear' : __dataPath+'AMFPollutedClear',
'AMFPollutedClearStd' : __dataPath+'AMFPollutedClearStd',
'AMFPollutedCloudy' : __dataPath+'AMFPollutedCloudy',
'AMFPollutedCloudyStd' : __dataPath+'AMFPollutedCloudyStd',
'AMFPollutedStd' : __dataPath+'AMFPollutedStd',
'AMFPollutedToGround' : __dataPath+'AMFPollutedToGround',
'AMFPollutedToGroundStd' : __dataPath+'AMFPollutedToGroundStd',
'AMFQualityFlags' : __dataPath+'AMFQualityFlags',
'AMFUnpolluted' : __dataPath+'AMFUnpolluted',
'AMFUnpollutedClear' : __dataPath+'AMFUnpollutedClear',
'AMFUnpollutedClearStd' : __dataPath+'AMFUnpollutedClearStd',
'AMFUnpollutedCloudy' : __dataPath+'AMFUnpollutedCloudy',
'AMFUnpollutedCloudyStd' : __dataPath+'AMFUnpollutedCloudyStd',
'AMFUnpollutedStd' : __dataPath+'AMFUnpollutedStd',
'ChiSquaredOfFit' : __dataPath+'ChiSquaredOfFit',
'CloudFraction' : __dataPath+'CloudFraction',
'CloudFractionStd' : __dataPath+'CloudFractionStd',
'CloudPressure' : __dataPath+'CloudPressure',
'CloudPressureStd' : __dataPath+'CloudPressureStd',
'CloudRadianceFraction' : __dataPath+'CloudRadianceFraction',
'ColumnAmountNO2' : __dataPath+'ColumnAmountNO2',
'ColumnAmountNO2Std' : __dataPath+'ColumnAmountNO2Std',
'ColumnAmountNO2BelowCloud' : __dataPath+'ColumnAmountNO2BelowCloud',
'ColumnAmountNO2BelowCloudStd' : __dataPath+'ColumnAmountNO2BelowCloudStd',
'ColumnAmountNO2Initial' : __dataPath+'ColumnAmountNO2Initial',
'ColumnAmountNO2InitialStd' : __dataPath+'ColumnAmountNO2InitialStd',
'ColumnAmountNO2Polluted' : __dataPath+'ColumnAmountNO2Polluted',
'ColumnAmountNO2PollutedStd' : __dataPath+'ColumnAmountNO2PollutedStd',
'ColumnAmountNO2Trop' : __dataPath+'ColumnAmountNO2Trop',
'ColumnAmountNO2TropStd' : __dataPath+'ColumnAmountNO2TropStd',
'ColumnAmountNO2Unpolluted' : __dataPath+'ColumnAmountNO2Unpolluted',
'ColumnAmountNO2UnpollutedStd' : __dataPath+'ColumnAmountNO2UnpollutedStd',
'FitQualityFlags' : __dataPath+'FitQualityFlags',
'InstrumentConfigurationId' : __dataPath+'InstrumentConfigurationId',
'MeasurementQualityFlags' : __dataPath+'MeasurementQualityFlags',
'PolynomialCoefficients' : __dataPath+'PolynomialCoefficients',
'PolynomialCoefficientsStd' : __dataPath+'PolynomialCoefficientsStd',
'RingCoefficient' : __dataPath+'RingCoefficient',
'RingCoefficientStd' : __dataPath+'RingCoefficientStd',
'RootMeanSquareErrorOfFit' : __dataPath+'RootMeanSquareErrorOfFit',
'SlantColumnAmountH2O' : __dataPath+'SlantColumnAmountH2O',
'SlantColumnAmountH2OStd' : __dataPath+'SlantColumnAmountH2OStd',
'SlantColumnAmountNO2' : __dataPath+'SlantColumnAmountNO2',
'SlantColumnAmountNO2Std' : __dataPath+'SlantColumnAmountNO2Std',
'SlantColumnAmountO2O2' : __dataPath+'SlantColumnAmountO2O2',
'SlantColumnAmountO2O2Std' : __dataPath+'SlantColumnAmountO2O2Std',
'SlantColumnAmountO3' : __dataPath+'SlantColumnAmountO3',
'SlantColumnAmountO3Std' : __dataPath+'SlantColumnAmountO3Std',
'SmallPixelRadiance' : __dataPath+'SmallPixelRadiance',
'SmallPixelRadiancePointer' : __dataPath+'SmallPixelRadiancePointer',
'TerrainHeight' : __dataPath+'TerrainHeight',
'TerrainPressure' : __dataPath+'TerrainPressure',
'TerrainReflectivity' : __dataPath+'TerrainReflectivity',
'TropFractionUnpolluted' : __dataPath+'TropFractionUnpolluted',
'TropFractionUnpollutedStd' : __dataPath+'TropFractionUnpollutedStd',
'UnpolFldLatBandQualityFlags' : __dataPath+'UnpolFldLatBandQualityFlags',
'WavelengthRegistrationCheck' : __dataPath+'WavelengthRegistrationCheck',
'WavelengthRegistrationCheckStd' : __dataPath+'WavelengthRegistrationCheckStd',
'XTrackQualityFlags' : __dataPath+'XTrackQualityFlags',
'vcdQualityFlags' : __dataPath+'vcdQualityFlags',
'GroundPixelQualityFlags' : __geoPath+'GroundPixelQualityFlags',
'Latitude' : __geoPath+'Latitude',
'Longitude' : __geoPath+'Longitude',
'SolarAzimuthAngle' : __geoPath+'SolarAzimuthAngle',
'SolarZenithAngle' : __geoPath+'SolarZenithAngle',
'SpacecraftAltitude' : __geoPath+'SpacecraftAltitude',
'SpacecraftLatitude' : __geoPath+'SpacecraftLatitude',
'SpacecraftLongitude' : __geoPath+'SpacecraftLongitude',
'Time' : __geoPath+'Time',
'ViewingAzimuthAngle' : __geoPath+'ViewingAzimuthAngle',
'ViewingZenithAngle' : __geoPath+'ViewingZenithAngle'}
_indexMap = {'default' : lambda var, ind: var[ind[0], ind[1], ...],
'SmallPixelRadiance' : lambda var, ind: var[:, ind[1]],
'SmallPixelRadiancePointer' : lambda var, ind: var[ind[0], :],
'InstrumentConfigurationId' : lambda var, ind: var[ind[0]],
'MeasurementQualityFlags' : lambda var, ind: var[ind[0]],
'WavelengthRegistrationCheck' : lambda var, ind: var[ind[0], :],
'WavelengthRegistrationCheckStd' : lambda var, ind: var[ind[0], :],
'UnpolFldLatBandQualityFlags' : lambda var, ind: var[:],
'Time' : lambda var, ind: var[ind[0]],
'SpacecraftLatitude' : lambda var, ind: var[ind[0]],
'SpacecraftLongitude' : lambda var, ind: var[ind[0]],
'SpacecraftAltitude' : lambda var, ind: var[ind[0]]}
def get_geo_corners(self):
'''
Retrieves array of the corners of the pixels.
Throws IOError if no pixel corner file specified
'''
latNodeName = '/HDFEOS/SWATHS/OMI Ground Pixel Corners VIS/Data Fields/FoV75CornerLatitude'
lonNodeName = '/HDFEOS/SWATHS/OMI Ground Pixel Corners VIS/Data Fields/FoV75CornerLongitude'
try:
pxFid = tables.openFile(self.pixCorners)
except AttributeError:
raise IOError('Unable to open pixel corners file. Need pixel corners file to use corners')
try:
latNode = pxFid.getNode('/', latNodeName)
lonNode = pxFid.getNode('/', lonNodeName)
# Note: it is assumed that there are no missing values.
lat = latNode[:].transpose((1,2,0))
lon = lonNode[:].transpose((1,2,0))
finally:
pxFid.close()
ind = numpy.indices(lat.shape[0:2]).transpose((1,2,0))
protoDtype = [('lat', lat.dtype, 4), ('lon', lon.dtype, 4), ('ind', ind.dtype, 2)]
struct = numpy.zeros(lat.shape[0:2], dtype=protoDtype)
(struct['lat'], struct['lon'], struct['ind']) = (lat, lon, ind)
return struct
def get_geo_centers(self):
lat = self.get('Latitude')
lon = self.get('Longitude')
ind = numpy.indices(lat.shape).transpose((1,2,0))
protoDtype = [('lat', lat.dtype), ('lon', lon.dtype), ('ind', ind.dtype, 2)]
struct = numpy.zeros(lat.shape, dtype=protoDtype)
(struct['lat'], struct['lon'], struct['ind']) = (lat, lon, ind)
return struct
class HDFmopittl2_File(HDF4File):
"""
Provide interface to MOPITT level 2 V5 product
Automatically setes the missing value for the data to
-9999.0, as this is the missing value used (but not
documented) within the data.
"""
_nameExpMap = {'Time' : '/MOP02/Geolocation Fields/Time',
'Latitude' : '/MOP02/Geolocation Fields/Latitude',
'Longitude' : '/MOP02/Geolocation Fields/Longitude',
'Seconds in Day' : '/MOP02/Data Fields/Seconds in Day',
'Pressure Grid' : '/MOP02/Data Fields/Pressure Grid',
'Solar Zenith Angle' : '/MOP02/Data Fields/Solar Zenith Angle',
'Satellite Zenith Angle' : '/MOP02/Data Fields/Satellite Zenith Angle',
'Surface Pressure' : '/MOP02/Data Fields/Surface Pressure',
'Retrieved Surface Temperature' : '/MOP02/Data Fields/Retrieved Surface Temperature',
'Retrieved Surface Emissivity' : '/MOP02/Data Fields/Retrieved Surface Emissivity',
'Retrieved CO Mixing Ratio Profile' : '/MOP02/Data Fields/Retrieved CO Mixing Ratio Profile',
'Retrieved CO Surface Mixing Ratio' : '/MOP02/Data Fields/Retrieved CO Surface Mixing Ratio',
'Retrieved CO Total Column' : '/MOP02/Data Fields/Retrieved CO Total Column',
'Retrieved CO Total Column Diagnostics' : '/MOP02/Data Fields/Retrieved CO Total Column Diagnostics',
'Retrieval Averaging Kernel Matrix' : '/MOP02/Data Fields/Retrieval Averaging Kernel Matrix',
'Retrieval Error Covariance Matrix' : '/MOP02/Data Fields/Retrieval Error Covariance Matrix',
'A Priori Surface Temperature' : '/MOP02/Data Fields/A Priori Surface Temperature',
'A Priori Surface Emissivity' : '/MOP02/Data Fields/A Priori Surface Emissivity',
'A Priori CO Mixing Ratio Profile' : '/MOP02/Data Fields/A Priori CO Mixing Ratio Profile',
'A Priori CO Surface Mixing Ratio' : '/MOP02/Data Fields/A Priori CO Surface Mixing Ratio',
'Level 1 Radiances and Errors' : '/MOP02/Data Fields/Level 1 Radiances and Errors',
'Degrees of Freedom for Signal' : '/MOP02/Data Fields/Degrees of Freedom for Signal',
'Surface Index' : '/MOP02/Data Fields/Surface Index',
'DEM Altitude' : '/MOP02/Data Fields/DEM Altitude',
'Cloud Description' : '/MOP02/Data Fields/Cloud Description',
'MODIS Cloud Diagnostics' : '/MOP02/Data Fields/MODIS Cloud Diagnostics',
'Water Vapor Climatology Content' : '/MOP02/Data Fields/Water Vapor Climatology Content',
'Retrieval Iterations' : '/MOP02/Data Fields/Retrieval Iterations',
'Information Content Index' : '/MOP02/Data Fields/Information Content Index',
'Signal Chi2' : '/MOP02/Data Fields/Signal Chi2',
'Swath Index' : '/MOP02/Data Fields/Swath Index'}
_indexMap = {'default' : lambda var, ind: var[ind[0], ...],
'Pressure Grid' : lambda var, ind: var[:]}
def get(self, key, indices=None):
'''Overloaded version of get that applies the correct missing value.'''
return HDF4File.get(self, key, indices, missingValue=-9999.0)
def get_cm(self, key, indices=None):
'''Overloaded version of get_cm that applied the correct missing value.'''
return HDF4File.get_cm(self, key, indices, missingValue=-9999.0)
def get_geo_centers(self):
'''Retrieves array of the corners of the pixels'''
lat = self.get('Latitude').squeeze()
lon = self.get('Longitude').squeeze()
ind = numpy.arange(lat.size).reshape(lat.size,1)
protoDtype = [('lat', lat.dtype), ('lon', lon.dtype), ('ind', ind.dtype, (1,))]
struct = numpy.zeros(lat.size, dtype = protoDtype)
(struct['lat'], struct['lon'], struct['ind']) = (lat, lon, ind)
return struct
| |
"""
Tkinter UI for StaSh
"""
import six
from six.moves import tkinter, tkinter_messagebox, tkinter_scrolledtext, queue
from ..shscreens import ShChar
from ..shcommon import K_CC, K_CD, K_HUP, K_HDN, K_LEFT, K_RIGHT, K_CU, K_TAB, K_HIST, K_CZ, K_KB
from .base import ShBaseUI, ShBaseTerminal, ShBaseSequentialRenderer
class ShUI(ShBaseUI):
"""
An UI using the Tkinter module.
"""
def __init__(self, *args, **kwargs):
ShBaseUI.__init__(self, *args, **kwargs)
# ui
self.tk = tkinter.Tk()
self.tk.title("StaSh")
self.tk.protocol("WM_DELETE_WINDOW", self.on_close)
# fullscreen logic
# from: https://stackoverflow.com/a/23840010
self._fullscreen = False
self.tk.bind_all("<F11>", self._toggle_fullscreen)
# terminal
self.terminal = ShTerminal(self.stash, self)
# right click menu
self._rc_menu = tkinter.Menu(self.tk, tearoff=0)
self._rc_menu.add_command(label="Copy", command=self._rc_copy)
self._rc_menu.add_command(label="Paste", command=self._rc_paste)
self._rc_menu.add_command(label="Toggle Fullscreen", command=self._toggle_fullscreen)
self._rc_menu.add_command(label="Quit", command=self.stash.close)
self.tk.bind("<Button-3>", self._popup_rc_menu) # TODO: check <Button-3> portability
def show(self):
self.tk.mainloop()
def close(self):
self.on_exit() # not on_close()
self._close_ui()
def on_close(self):
"""
Called when the window will be closed
"""
if tkinter_messagebox.askokcancel(u"Quit", u"Are you sure you want to quit?"):
self.on_exit()
self._close_ui()
def _close_ui(self):
"""
Actually close the UI.
"""
self.stash.renderer._stop_rendering()
self.tk.destroy()
def history_present(self, history):
window = tkinter.Toplevel(self.tk)
listbox = tkinter.Listbox(window)
listbox.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=1)
scrollbar = tkinter.Scrollbar(window, orient=tkinter.VERTICAL)
scrollbar.config(command=listbox.yview)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
listbox.config(yscrollcommand=scrollbar.set)
# insert data
items = history.getlist()
for line in items:
listbox.insert(tkinter.END, line)
listbox.bind("<Double-Button-1>", lambda e: self._history_selected(window, items, listbox.curselection()))
listbox.bind("<Return>", lambda e: self._history_selected(window, items, listbox.curselection()))
listbox.focus_set()
def _history_selected(self, window, items, idx):
"""
Called when a line was selected from the history popover.
:param window: the history popover window
:type window: tkinter.Toplevel
:param items: list of lines in the history
:type items: list of str
:param idx: selected index
:type idx: int
"""
i = idx[0]
window.destroy()
line = items[i]
self.history_selected(line, i)
def _popup_rc_menu(self, event):
"""
Show self._rc_menu as a popup.
:param event: tkinter event
"""
try:
self._rc_menu.post(event.x_root, event.y_root)
finally:
self._rc_menu.grab_release()
def _rc_copy(self):
"""
Called on "Copy" in rc_menu. Copy selected content to clipboard.
"""
sr = self.terminal.selected_range
selected_text = self.terminal.text[sr[0]:sr[1]]
self.stash.libdist.clipboard_set(selected_text)
def _rc_paste(self):
"""
Called on "Paste" in rc_menu. Paste text from clipboard.
"""
text = self.stash.libdist.clipboard_get()
rng = self.terminal.selected_range
self.stash.user_action_proxy.tv_responder.textview_should_change(None, rng, text)
def _toggle_fullscreen(self, event=None):
"""
Toggle the fullscreen mode.
"""
self._fullscreen = not self._fullscreen
self.tk.attributes("-fullscreen", self._fullscreen)
return "break"
class ShTerminal(ShBaseTerminal):
"""
A Terminal using the Tkinter module
"""
_LOOP_DELAY = 5
_keymapping = { # tkinter event -> StaSh key
"\x03": K_CC, # ctrl-c
"\t": K_TAB, # tab
"\x08": K_HIST, # ctrl-h
"\x1a": K_CZ, # ctrl-z
"\x15": K_CU, # ctrl-u
}
def __init__(self, stash, parent):
ShBaseTerminal.__init__(self, stash, parent)
self._txtvar_out = tkinter.StringVar(self.parent.tk)
self._txtvar_out.trace("w", self._update_text)
self._txt = tkinter_scrolledtext.ScrolledText(
self.parent.tk,
wrap=tkinter.CHAR,
bg=self._color_from_tuple(self.background_color),
fg=self._color_from_tuple(self.text_color),
insertbackground=self._color_from_tuple(self.tint_color),
selectbackground=self._color_from_tuple(self.tint_color),
)
self._txt.pack(fill=tkinter.BOTH, expand=1)
# binding
self._txt.bind("<Key>", self._on_key_press)
self._txt.bind("<FocusIn>", self._on_focus)
self._txt.bind("<FocusOut>", self._on_focus)
self._txt.bind("<Left>", self._arrow_key_pressed)
self._txt.bind("<Right>", self._arrow_key_pressed)
self._txt.bind("<Up>", self._arrow_key_pressed)
self._txt.bind("<Down>", self._arrow_key_pressed)
# we can not yet initialize the color system, so we need to do this later
self._colors_initialized = False
# output queue
self._q = queue.Queue()
self.parent.tk.after(self._LOOP_DELAY, self._loop)
def _loop(self):
try:
v = self._q.get(0)
except queue.Empty:
pass
else:
self._txtvar_out.set(v)
self.parent.tk.after(self._LOOP_DELAY, self._loop)
@property
def text(self):
return self._txt.get("1.0", tkinter.END).replace("\r\n", "\n").replace("\r", "\n")[:-1]
@text.setter
def text(self, value):
self._q.put(value)
def _on_key_press(self, event):
"""
Called when a key was pressed.
:param event: the event which fired this callback
:type event: six.moves.tkinter.Event
"""
# get the current position
cp = self._get_cursor_position() # TODO: check if this must be calculated before or after the keypress
rng = self.selected_range
replacement = event.char
skip_should_change = False # if true, skip should_change
if self.debug:
self.logger.debug("key {!r} pressed (symbol: {!r}; selected: {!r})".format(replacement, event.keysym, rng))
if replacement in ("\r", "\r\n"):
replacement = "\n"
elif replacement == "\x08" and event.keysym != "h":
# backspace (for some reason, same code as ctrl-h)
replacement = u""
if rng[0] == rng[1]:
rng = (rng[0] - 1, rng[1])
elif replacement == "\x7f":
# del
replacement = u""
skip_should_change = True
if rng[0] == rng[1]:
rng = (rng[0], rng[1])
elif replacement in self._keymapping:
self.stash.user_action_proxy.vk_tapped(self._keymapping[replacement])
return "break"
if skip_should_change or self.stash.user_action_proxy.tv_responder.textview_should_change(None, rng, replacement):
self.parent.tk.after(0, self._notify_change)
#self.parent.tk.after(0, self._notify_cursor_move)
else:
# break event
return "break"
# TODO: the cursor probably moved
def _arrow_key_pressed(self, event):
"""
Called when an arrow key was pressed.
"""
d = event.keysym.lower()
if d == "left":
# self.parent.arrowLeftAction()
self.stash.user_action_proxy.vk_tapped(K_LEFT)
elif d == "right":
# self.parent.arrowRightAction()
self.stash.user_action_proxy.vk_tapped(K_RIGHT)
elif d == "up":
# self.parent.arrowUpAction()
self.stash.user_action_proxy.vk_tapped(K_HUP)
elif d == "down":
# self.parent.arrowDownAction()
self.stash.user_action_proxy.vk_tapped(K_HDN)
else:
raise ValueError("Unknown key: {!r}".format(d))
return "break"
def _notify_change(self):
"""
Notify StaSh that the text changed.
"""
self.stash.user_action_proxy.tv_responder.textview_did_change(None)
def _set_text(self, text):
"""
Set the text.
:param text: text to set
:type text: str
"""
self.text = text
def _on_focus(self, event):
"""
Called when the focus was lost.
:param event: the event which fired this callback
:type event: six.moves.tkinter.Event
"""
self.stash.user_action_proxy.tv_responder.textview_did_begin_editing(None)
def _on_focus_loss(self, event):
"""
Called when the focus was lost.
:param event: the event which fired this callback
:type event: six.moves.tkinter.Event
"""
self.stash.user_action_proxy.tv_responder.textview_did_end_editing(None)
def _get_cursor_position(self):
"""
Return the cursor position as a delta from the start.
:return: the cursor position
:rtype: int
"""
v = self._get_absolute_cursor_position()
return self._abs_cursor_pos_to_rel_pos(v)
def _get_absolute_cursor_position(self):
"""
Return the actual cursor position as a tuple of (row, column)
:return: (row, column) of cursor
:rtype: tuple of (int, int)
"""
# source of first line: https://stackoverflow.com/questions/30000368/how-to-get-current-cursor-position-for-text-widget
raw = self._txt.index(tkinter.INSERT)
return self._tk_index_to_tuple(raw)
def _abs_cursor_pos_to_rel_pos(self, value, lines=None):
"""
Convert an absolute cursor position (tuple of (int, int)) into a index relative to the start (int).
'lines' are optional and specify a list of lines on which these calculations should be made.
:param value: value to convert
:type value: tuple of (int, int)
:param lines: alternative lines to calculate position from (default: current lines)
:type lines: list of str
"""
if lines is None:
# get lines
lines = self.text.split("\n")
row, column = value
n = 0
# first, add all lines before the current one
for i in range(row):
line = lines[i]
n += len(line) + 1 # 1 for linebreak
# add column
n += column
# done
return n
def _rel_cursor_pos_to_abs_pos(self, value, lines=None):
"""
Convert a cursor position relative to the start (int) to a tuple of (row, column).
'lines' are optional and specify a list of lines on which these calculations should be made.
:param value: value to convert
:type value: int
:param lines: alternative lines to calculate position from (default: current lines)
:type lines: list of str
"""
if lines is None:
# get lines
lines = self.text.split("\n")
n = value
row = 0
while True:
if row >= len(lines):
# for some reason, we are at the end of the text. this is probably a bug, but lets return an approximate value to the end
return (len(lines) - 1, len(lines[len(lines) - 1]) - 1 )
ll = len(lines[row])
if n <= ll:
# n fits in line
return row, n
else:
# n must be in next line
n -= (ll + 1) # 1 for newline
row += 1
def _tk_index_to_tuple(self, value):
"""
Convert a tkinter index to a tuple of (row, column), starting at 0
:param value: value to convert
:type value: str
:return: the converted value as (row, column), both starting at 0
:rtype: tuple of (int, int)
"""
splitted = value.split(".")
row = int(splitted[0]) - 1
column = int(splitted[1])
return (row, column)
def _tuple_to_tk_index(self, value):
"""
Convert a (row, column) tuple to a tk index.
:param value: value to convert
:type value: tuple of (int, int)
:return: the converted value
:rtype: str
"""
row, column = value
return str(row + 1) + "." + str(column)
def _get_selection_range(self):
"""
Return the index of the currently selected text.
:return: start and end index of the currently selected text
:rtype: tuple of (int, int)
"""
# based on: https://stackoverflow.com/questions/4073468/how-do-i-get-a-selected-string-in-from-a-tkinter-text-box
# check if text is selected
if not self._txt.tag_ranges(tkinter.SEL):
return None, None
raw_start = self._txt.index(tkinter.SEL_FIRST)
raw_end = self._txt.index(tkinter.SEL_LAST)
si = self._tk_index_to_tuple(raw_start)
ei = self._tk_index_to_tuple(raw_end)
rsi = self._abs_cursor_pos_to_rel_pos(si)
rei = self._abs_cursor_pos_to_rel_pos(ei)
return rsi, rei
def _leftmost(self):
"""
Check if the current cursor is at the left end of the modifiable chars.
"""
return self._get_cursor_position() <= self.stash.main_screen.x_modifiable
def _update_text(self, *args):
"""
Update the text
"""
self._txt.delete("1.0", tkinter.END)
out = self._txtvar_out.get()
self._txt.insert("1.0", out)
def _tag_for_char(self, c):
"""
Return the tag to use for the given character.
:param c: character to get tag for
:type c: stash.system.shscreens.ShChar
:return: the tag used for this char
:rtype: str
"""
return self._tag_for_options(
fg=c.fg,
bg=c.bg,
bold=c.bold,
italics=c.italics,
underscore=c.underscore,
strikethrough=c.strikethrough,
reverse=c.reverse,
)
def _tag_for_options(self,
fg="default",
bg="default",
bold=False,
italics=False,
underscore=False,
strikethrough=False,
reverse=False,
):
"""
Return a tag which described the given options.
:param fg: fg color
:type fg: str
:bg: bg color
:type bg: str
:param bold: boldness
:type bold: bool
:param italics: toogle italics
:type italics: bool
:param underscore: toogle underscore
:type underscore: bool
:param striketrough: toogle striketrough
:type striketrough: bool
:param reverse: no idea
:type reverse: bool
:return: a tag which identifies this style
:rtype: str
"""
s = "{}-{}".format(fg, bg)
if bold:
s += "-bold"
if italics:
s += "italics"
if underscore:
s += "-underscore"
if strikethrough:
s += "-strikethrough"
if reverse:
s += "-reverse"
return s
def _add_color_tags(self):
"""
Add the color tags.
"""
# TODO: surely there is a better way of doing this.
self.logger.info("Initializing color system...")
for fg in self.stash.renderer.FG_COLORS:
for bg in self.stash.renderer.BG_COLORS:
for bold in (False, True):
for italics in (False, True):
for underscore in (False, True):
for strikethrough in (False, True):
# striketrough is implemented in replace_in_range()
for reverse in (False, True):
# reverse does not actually seem to be used anywhere
tag = self._tag_for_options(
fg=fg,
bg=bg,
bold=bold,
italics=italics,
underscore=underscore,
strikethrough=strikethrough,
reverse=reverse,
)
kwargs = {}
fontattrs = []
if fg != "default":
kwargs["foreground"] = self.stash.renderer.FG_COLORS[fg]
if bg != "default":
kwargs["background"] = self.stash.renderer.BG_COLORS[bg]
if underscore:
kwargs["underline"] = True
if bold:
fontattrs.append("bold")
if italics:
fontattrs.append("italic")
font = ("Menlo-regular", self.font_size, " ".join(fontattrs))
kwargs["font"] = font
self._txt.tag_config(
tag,
**kwargs
)
# TODO: support for reverse
self._colors_initialized = True
self.logger.info("Color system initialized.")
def _color_from_tuple(self, value):
"""
Convert an rgb color tuple to a hex color
:param value: value to convert
:type value: tuple of (int, int, int)
:return: hexcode of color
:rtype: str
"""
r, g, b = value
r = int(255 * r)
g = int(255 * g)
b = int(255 * b)
hexcode = "#{:02X}{:02X}{:02X}".format(r, g, b)
return hexcode
# ============= api implementation ============
@property
def selected_range(self):
start, end = self._get_selection_range()
if (start is None) or (end is None):
cp = self._get_cursor_position()
return (cp, cp)
else:
return (start, end)
@selected_range.setter
def selected_range(self, value):
assert isinstance(value, tuple)
assert len(value) == 2
assert isinstance(value[0], int) and isinstance(value[1], int)
if value == self.selected_range:
# do nothing
pass
else:
# set cursor synced to false
self.cursor_synced = False
# set tag
start = self._tuple_to_tk_index(self._rel_cursor_pos_to_abs_pos(value[0]))
end = self._tuple_to_tk_index(self._rel_cursor_pos_to_abs_pos(value[1]))
self._txt.tag_add(tkinter.SEL, start, end)
self._txt.mark_set(tkinter.INSERT, end)
# set focus
self.set_focus()
def scroll_to_end(self):
self._txt.see(tkinter.END)
def set_focus(self):
self._txt.focus_set()
def lose_focus(self):
self.parent.tk.focus_set()
def replace_in_range(self, rng, text):
"""
Replace the text in the given range
:param rng: range to replace (start, length)
:type rng: tuple of (int, int)
:param text: text to insert
:type text: iterable of str or ShChar
"""
rstart, length = rng
start, end = self._rel_cursor_pos_to_abs_pos(rstart), self._rel_cursor_pos_to_abs_pos(rstart + length)
tkstart, tkend = self._tuple_to_tk_index(start), self._tuple_to_tk_index(end)
saved = self.selected_range
self._txt.delete(tkstart, tkend)
cp = rstart
for c in text:
a = 1
ctkp = self._tuple_to_tk_index(self._rel_cursor_pos_to_abs_pos(cp))
if isinstance(c, (six.binary_type, six.text_type)):
self._txt.insert(ctkp, c)
elif isinstance(c, ShChar):
if not self._colors_initialized:
self._add_color_tags()
ch = c.data
if c.strikethrough:
ch = u"\u0336" + ch
a += 1
self._txt.insert(ctkp, ch, self._tag_for_char(c))
else:
raise TypeError("Unknown character type {!r}!".format(type(c)))
cp += a
self.selected_range = saved # restore cursor position
def get_wh(self):
"""
Return the number of columns and rows.
:return: number of columns and rows.
:rtype: tuple of (int, int)
"""
return (self._txt.config("width")[4], self._txt.config("height")[4])
class ShSequentialRenderer(ShBaseSequentialRenderer):
"""
ShSequentialBaseRenderer for Tkinter
"""
RENDER_INTERVAL = 1
FG_COLORS = {
'black': "black",
'red': "red",
'green': "green",
'brown': "brown",
'blue': "blue",
'magenta': "magenta",
'cyan': "cyan",
'white': "white",
'gray': "gray",
'yellow': "yellow",
'smoke': "gray64",
'default': "white",
}
BG_COLORS = {
'black': "black",
'red': "red",
'green': "green",
'brown': "brown",
'blue': "blue",
'magenta': "magenta",
'cyan': "cyan",
'white': "white",
'gray': "gray",
'yellow': "yellow",
'smoke': "gray64",
'default': "red",
}
def __init__(self, *args, **kwargs):
ShBaseSequentialRenderer.__init__(self, *args, **kwargs)
self.should_render = False
self._render_loop_active = True
self.stash.ui.tk.after(0, self._renderer_loop)
def _renderer_loop(self):
"""
Internal renderer loop.
"""
if not self._render_loop_active:
# quit loop
return
if self.should_render:
self.should_render = False
self._render()
self.stash.ui.tk.after(self.RENDER_INTERVAL, self._renderer_loop)
def render(self, no_wait=False):
self.should_render = True
def _stop_rendering(self):
"""
Stop the render loop.
"""
self._render_loop_active = False
def _render(self, no_wait=False):
# Lock screen to get atomic information
with self.screen.acquire_lock():
intact_left_bound, intact_right_bound = self.screen.get_bounds()
screen_buffer_length = self.screen.text_length
cursor_xs, cursor_xe = self.screen.cursor_x
renderable_chars = self.screen.renderable_chars
self.screen.clean()
# First remove any leading texts that are rotated out
if intact_left_bound > 0:
self.terminal.replace_in_range((0, intact_left_bound), '')
tv_text_length = self.terminal.text_length # tv_text_length = tvo_texts.length()
# Second (re)render any modified trailing texts
# When there are contents beyond the right bound, either on screen
# or on terminal, the contents need to be re-rendered.
if intact_right_bound < max(tv_text_length, screen_buffer_length):
if len(renderable_chars) > 0:
self.terminal.replace_in_range(
(intact_right_bound,
tv_text_length - intact_right_bound),
# "".join([c.data for c in renderable_chars]),
renderable_chars,
)
else: # empty string, pure deletion
self.terminal.replace_in_range(
(intact_right_bound,
tv_text_length - intact_right_bound),
'',
)
# Set the cursor position. This makes terminal and main screen cursors in sync
self.terminal.selected_range = (cursor_xs, cursor_xe)
# Ensure cursor line is visible by scroll to the end of the text
self.terminal.scroll_to_end()
| |
from threeML.minimizer import minimization
import collections
import numpy
import scipy.optimize
import scipy.stats
import sys
import matplotlib.pyplot as plt
class JointLikelihood(object):
def __init__(self,likelihoodModel,**kwargs):
#Process optional keyword parameters
self.verbose = False
defaultMinimizer = "MINUIT"
for k,v in kwargs.iteritems():
if(k.lower()=="verbose"):
self.verbose = bool(kwargs["verbose"])
elif(k.lower()=="minimizer"):
defaultMinimizer = v.upper()
pass
pass
self.likelihoodModel = likelihoodModel
self.dataSets = likelihoodModel.dataList.datasets.values()
for ds in self.dataSets:
#The following is to ensure the proper set of some
#datasets (for example, for the LAT datasets this
#generate the new XML model which allow the user to set
#a prior for the effective area correction after instanciating
#this class)
ds.set_model(self.likelihoodModel)
dumb = ds.get_log_like()
pass
self._buildGlobalLikelihoodFunctions()
self.sampler = None
#These will store the best fit results
self.bestFitValues = collections.OrderedDict()
self.approxErrors = collections.OrderedDict()
#Default minimizer is MINUIT
self.setMinimizer(defaultMinimizer)
pass
def _buildGlobalLikelihoodFunctions(self):
self.ncalls = 0
#Global likelihood function, profiling out nuisance parameters
def minusLogLikeProfile(args):
self.ncalls += 1
#Assign the new values to the parameters
for i,parname in enumerate(self.freeParameters.keys()):
self.likelihoodModel.parameters[parname].setValue(args[i])
pass
valuesString = self.modelManager.printParamValues(False)
#Now profile out nuisance parameters and compute the new value
#for the likelihood
globalLogLike = 0
for dataset in self.dataSets:
#print("Dataset %s" % dataset.getName())
dataset.inner_fit()
#print("Inner fit done")
globalLogLike += dataset.get_log_like()
#print("Like computation done")
pass
if("%s" % globalLogLike=='nan'):
print("Warning: these parameters returned a logLike = Nan: %s" %(valuesString))
return 1e6
if(self.verbose):
print("Trying with parameters %s, resulting in logL = %s" %(valuesString,globalLogLike))
return globalLogLike*(-1)
pass
#Global likelihood function
def minusLogLike(args):
#Assign the new values to the parameters of the model
values = []
for i,par in enumerate(self.freeParameters.keys()):
self.modelManager[par].setValue(args[i])
values.append(args[i])
#Now compute the new value for the likelihood
globalLogLike = 0
for dataset in self.dataSets:
globalLogLike += dataset.get_log_like()
pass
if(self.verbose):
print("Trying with parameters %s, resulting in logL = %s" %(",".join(map(lambda x:str(x),values)),globalLogLike))
return globalLogLike*(-1)
pass
#Store it
self.minusLogLike = minusLogLike
self.minusLogLikeProfile = minusLogLikeProfile
pass
def explore(self,nwalkers,nsamplesPerWalker,burn=None):
import emcee
self.freeParameters = self.modelManager.getFreeParameters()
#Default burnout is nsamples/10:
if(burn==None):
burn = int(nsamplesPerWalker/10.0)
print("Using default burn of nsamples/10 = %i" %(burn))
pass
def lnprior(pars):
globalLnPrior = 0
for i,p in enumerate(self.freeParameters.keys()):
lnprior = self.modelManager[p].prior
#value = self.modelManager.setParameterValue(p,pars[i])
globalLnPrior += lnprior(pars[i])
#print("Parameter %s = %s -> lnprior = %s" %(p,pars[i],lnprior(pars[i])))
pass
#print("globalLnPrior is %s\n" %(globalLnPrior))
return globalLnPrior
def lnprob(theta):
lp = lnprior(theta)
if not numpy.isfinite(lp):
#print("lnprob is infinite\n")
return -numpy.inf
tot = lp + self.minusLogLike(theta)*(-1)
#print("%s" %(tot-lp))
return tot
def lnprob2(theta):
return self.minusLogLike(theta)*(-1)
#Get some init values from the profile likelihood fit
if(len(self.freeParameters.keys()) < 20):
print("Performing profile-likelihood minimization to get init values...")
res = self.fit()
print("\nNow sampling posterior distribution with MCMC...")
#This merges the two lists
allValues = res[0].values()
else:
res = self.fit(False,True)
print res
allValues = map(lambda x:x.value,self.freeParameters.values())
print allValues
print self.minusLogLike(allValues)
pass
ntemps = 20
ndim = len(allValues)
#p0 = [numpy.array(allValues)*numpy.random.uniform(0.9,1.1,ndim) for i in range(nwalkers)]
p0 = numpy.random.uniform(0.9,1.1,size=(ntemps,nwalkers,ndim))*numpy.array(allValues)
self.sampler = emcee.PTSampler(ntemps,nwalkers, ndim, lnprob2,lnprior)
#self.sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
self.sampler.reset()
if(burn>0):
for p,lnprob,lnlike in self.sampler.sample(p0,iterations=burn):
pass
#r = self.sampler.run_mcmc(p0, burn)
self.sampler.reset()
else:
p = p0
pass
for p, lnprob,lnlike in self.sampler.sample(p, lnprob0=lnprob,lnlike0=lnlike,iterations=nsamplesPerWalker):
pass
#r = self.sampler.run_mcmc(p0, nsamplesPerWalker)
print("done")
ndim = self.sampler.chain.shape[-1]
self.samples = self.sampler.chain[:,:, :, :].reshape((-1, ndim))
pass
def multinest(self,*args,**kwargs):
import pymultinest
#res = self.fit(False,True)
#f = open("calls.txt","w+")
self.freeParameters = self.modelManager.getFreeParameters()
def prior(cube, ndim, nparams):
for i,p in enumerate(self.freeParameters.values()):
cube[i] = p.prior.multinestCall(cube[i])
pass
pass
def loglike(cube, ndim, nparams):
logL = self.minusLogLike(cube)*(-1)
if(numpy.isnan(logL)):
logL = -1e10
#f.write(" ".join(map(lambda x:"%s" %x,cube[:ndim])))
#f.write(" %s\n" % logL)
return logL
pass
if('verbose' not in kwargs):
kwargs['verbose'] = True
if('resume' not in kwargs):
kwargs['resume'] = False
if('outputfiles_basename' not in kwargs):
kwargs['outputfiles_basename'] = '_1_'
pass
kwargs['log_zero'] = -1e9
pymultinest.run(loglike, prior, len(self.freeParameters), *args, **kwargs)
print("done")
#Collect the samples
analyzer = pymultinest.Analyzer(n_params=len(self.freeParameters),outputfiles_basename=kwargs['outputfiles_basename'])
eqw = analyzer.get_equal_weighted_posterior()
self.samples = eqw[:,:-1]
self.posteriors = eqw[:,-1]
#f.close()
pass
def getPercentiles(self,burnout=0,**kwargs):
'''
Get percentiles from the current MCMC chain
'''
#Process optional parameters
printout = True
levels = [50,16,84]
for k,v in kwargs.iteritems():
if(k.lower()=="printout"):
printout = bool(v)
elif(k.lower()=="levels"):
levels = list(v)
pass
pass
parnames = self.freeParameters.keys()
percentiles = collections.OrderedDict()
for i,p in enumerate(parnames):
percentiles[p] = numpy.percentile(self.samples[:,i],levels)
pass
if(printout):
print("Percentiles: %s" %(levels))
for k,v in percentiles.iteritems():
print("%-40s = %.4g %.4g %.4g" %(k,v[0],v[1]-v[0],v[2]-v[0]))
pass
pass
return percentiles
pass
def setMinimizer(self,minimizer):
if(minimizer.upper()=="MINUIT"):
self.Minimizer = minimization.MinuitMinimizer
elif(minimizer.upper()=="SCIPY"):
self.Minimizer = minimization.ScipyMinimizer
elif(minimizer.upper()=="BOBYQA"):
self.Minimizer = minimization.BOBYQAMinimizer
else:
raise ValueError("Do not know minimizer %s" %(minimizer))
pass
pass
def fit(self,minos=False,normOnly=False):
if(1==0):
#Fit the normalizations of the spectral model first, otherwise, if they are too far off, they will
#prevent the minimizer to find a solution
self.freeParameters = self.modelManager.getFreeNormalizationParameters()
if(len(self.freeParameters.values())>0):
minimizer = self.Minimizer(self.minusLogLikeProfile,self.freeParameters)
xs,xserr,logLmin = minimizer.minimize(False,False)
pass
if(normOnly):
self.freeParameters = self.modelManager.getFreeParameters()
return self.modelManager.getFreeNormalizationParameters(),logLmin
#Now, assuming that we have a decent normalization, constrain it to remain within 1/100th and 100 times
#the current value (it improves A LOT the convergence speed, especially with MINUIT)
for k,v in self.freeParameters.iteritems():
value = v.value
v.set_bounds(value/100.0,value*100.0)
v.setDelta(value/10.0)
pass
pass
#Now perform the real fit
#Get and store the parameters from the model manager
freeParameters = self.modelManager.getFreeParameters()
self.freeParameters = collections.OrderedDict()
for k,v in freeParameters.iteritems():
if(v.isNuisance()):
continue
else:
self.freeParameters[k] = v
pass
pass
minimizer = self.Minimizer(self.minusLogLikeProfile,self.freeParameters)
xs,xserr,logLmin = minimizer.minimize(minos,False)
print("Minimum of -logLikelihood is: %s" %(logLmin))
print("Contributions to the -logLikelihood at the minimum:")
for dataset in self.dataSets:
print("%-50s: %s" %(dataset.get_name(),dataset.get_log_like()*(-1)))
pass
#Print and store results for future use
print("\nValues for the parameters at the minimum are:")
for i,(k,v) in enumerate(self.modelManager.getFreeParameters().iteritems()):
if(v.isNuisance()):
msg = "(nuisance)"
else:
msg = ''
pass
print("%-50s = %6.3g %s" %(k,v.value,msg))
self.bestFitValues[k] = v.value
if(v.isNuisance()):
self.approxErrors[k] = 0
else:
self.approxErrors[k] = xserr[i]
pass
self.logLmin = logLmin
return self.bestFitValues,logLmin
pass
def _restoreBestFitValues(self):
#Restore best fit values
for k in self.freeParameters.keys():
self.freeParameters[k].setValue(self.bestFitValues[k])
self.modelManager[k].setValue(self.bestFitValues[k])
pass
pass
def getErrors(self,confidenceLevel=0.68268949213708585,**kwargs):
'''
Compute asymptotic errors using the Likelihood Ratio Test. Usage:
computeErrors(0.68)
will compute the 1-sigma error region, while:
computeErrors(0.99)
will compute the 99% c.l. error region, and so on. Alternatively, you
can specify the number of sigmas corresponding to the desired c.l., as:
computeErrors(sigma=1)
to get the 68% c.l., or:
computeErrors(sigma=2)
to get the ~95% c.l.
'''
equivalentSigma = None
plotProfiles = False
for k,v in kwargs.iteritems():
if(k.lower()=="sigma"):
equivalentSigma = float(v)
elif(k.lower()=="profiles"):
plotProfiles = bool(v)
pass
if(confidenceLevel > 1.0 or confidenceLevel <= 0.0):
raise RuntimeError("Confidence level must be 0 < cl < 1. Ex. use 0.683 for 1-sigma interval")
#Get chisq critical value corresponding to this confidence level
if(equivalentSigma==None):
equivalentSigma = scipy.stats.norm.isf((1-confidenceLevel)/2.0)
else:
confidenceLevel = 1-(scipy.stats.norm.sf(equivalentSigma)*2.0)
pass
criticalValue = scipy.stats.chi2.isf([1-confidenceLevel],1)[0]
print("Computing %.3f c.l. errors (chisq critical value: %.3f, equivalent sigmas: %.3f sigma)" %(confidenceLevel,criticalValue,equivalentSigma))
#Now computing the errors
if(len(self.bestFitValues.keys())==0):
raise RuntimeError("You have to perform a fit before calling computeErrors!")
#Find confidence intervals for all parameters, except nuisance ones
paramNames = self.bestFitValues.keys()
paramList = []
for par in paramNames:
if(self.modelManager[par].isNuisance()):
continue
else:
paramList.append(par)
pass
pass
confInterval = collections.OrderedDict()
for i,parname in enumerate(paramList):
sys.stdout.write("Computing error for parameter %s...\n" %(parname))
#Get the list of free parameters
self.freeParameters = self.modelManager.getFreeParameters()
self._restoreBestFitValues()
#Remove the current parameter from the list of free parameters,
#so that it won't be varied
self.freeParameters.pop(parname)
#Build the profile logLike for this parameter
def thisProfileLikeRenorm(newValue):
self._restoreBestFitValues()
#Set the current parameter to its current value
#newValue = newValue[0]
self.modelManager[parname].setValue(newValue)
#Fit all other parameters
minimizer = self.Minimizer(self.minusLogLikeProfile,self.freeParameters)
_,_,proflogL = minimizer.minimize(False,False)
#Subtract the minimum and the critical value/2, so that when this is 0 the true profileLogLike is
#logL+critical value/2.0
#(the factor /2.0 comes from the LRT, which has 2*deltaLoglike as statistic)
return proflogL-self.logLmin-criticalValue/2.0
pass
#Find the values of the parameter for which the profile logLike is
# equal to the minimum - critical value/2.0, i.e., when thisProfileLikeRenorm is 0
#We will use the approximate error (the sqrt of the diagonal of the covariance matrix)
#as starting point for the search. Since it represents the approximate 1 sigma error,
#we have to multiply it by the appropriate number of sigmas
bounds = []
for kind in ['lower','upper']:
if(kind=='lower'):
for i in [1.0,1.1,1.5,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0]:
approxSolution = self.bestFitValues[parname]-i*equivalentSigma*abs(self.approxErrors[parname])
if(thisProfileLikeRenorm(approxSolution) > 0):
break
else:
for i in [1.0,1.1,1.5,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0]:
approxSolution = self.bestFitValues[parname]+i*equivalentSigma*abs(self.approxErrors[parname])
if(thisProfileLikeRenorm(approxSolution) > 0):
break
pass
if(approxSolution < self.modelManager[parname].minValue):
approxSolution = self.modelManager[parname].minValue*1.1
pass
if(approxSolution > self.modelManager[parname].maxValue):
approxSolution = self.modelManager[parname].maxValue*0.9
pass
tolerance = abs(self.bestFitValues[parname])/10000.0
if(self.verbose):
print("Approx solution for %s bound is %s, tolerance is %s" %(kind,approxSolution,tolerance))
try:
#This find the root of thisProfileLikeRenorm, i.e., the value of its argument for which
#it is zero
#results = scipy.optimize.root(thisProfileLikeRenorm,
# approxSolution,
# method='lm')
results = scipy.optimize.brentq(thisProfileLikeRenorm,approxSolution,self.bestFitValues[parname],rtol=1e-3)
except:
print("Error search for %s bound for parameter %s failed. Parameter is probably unconstrained." %(kind,parname))
raise
else:
#if(results['success']==False):
# print RuntimeError("Could not find a solution for the %s bound confidence for parameter %s" %(kind,parname))
# raise
#bounds.append(results['x'][0])
bounds.append(results)
pass
confInterval[parname] = [min(bounds),max(bounds)]
pass
self.freeParameters = self.modelManager.getFreeParameters()
print("\nBest fit values and their errors are:")
for parname in confInterval.keys():
value = self.bestFitValues[parname]
error1,error2 = confInterval[parname]
print("%-20s = %6.3g [%6.4g,%6.4g]" %(parname,value,error1-value,error2-value))
pass
if(plotProfiles):
#Plot the profile likelihoods for each parameter
npar = len(confInterval.keys())
nrows = npar/2
ncols = 2
if(nrows*ncols < npar):
nrow += 1
pass
fig,subs = plt.subplots(nrows,ncols)
for i,sub,(parname,interval) in zip(range(npar),subs.flatten(),confInterval.iteritems()):
#Remove this parameter from the freeParameters list
#Get the list of free parameters
self.freeParameters = self.modelManager.getFreeParameters()
self._restoreBestFitValues()
#Remove the current parameter from the list of free parameters,
#so that it won't be varied
self.freeParameters.pop(parname)
val = self.bestFitValues[parname]
errorM = interval[0]-val
errorP = interval[1]-val
grid = numpy.linspace(val+1.1*errorM,val+1.1*errorP,10)
grid = numpy.append(grid,val)
grid.sort()
logLonGrid = []
for g in grid:
self._restoreBestFitValues()
logLonGrid.append(2*(thisProfileLikeRenorm(g)+criticalValue/2.0))
pass
sub.plot(grid,logLonGrid)
sub.set_xlabel("%s" %(parname))
sub.set_ylabel(r"2 ($L_{prof}$-L$_{0}$)")
sub.axhline(criticalValue,linestyle='--')
#Reduce the number of x ticks
sub.locator_params(nbins=5,axis='x')
pass
plt.tight_layout()
pass
return confInterval
pass
pass
| |
# Copyright 2012 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unohelper
from com.sun.star.awt.tree import \
XTreeDataModel, XTreeNode, TreeDataModelEvent
from com.sun.star.beans import XMaterialHolder
class CustomTreeNode(unohelper.Base, XTreeNode, XMaterialHolder):
""" Customized tree node which allows to access by Python directly. """
def __init__(self, data_model, name, ondemand):
self.data_model = data_model
self.name = name
self.ondemand = ondemand
self.parent = None
self.children = []
self.data = None
self.id = 0
data_model.register_node(self)
def __repr__(self):
return "<%s.%s: %s>" % (self.__class__.__module__,
self.__class__.__name__, self.name)
def clear(self):
""" Clear data. """
self.data_model = data_model
self.data = None
# XMaterialHolder
def getMaterial(self):
""" Returns internal node ID. """
return self.id
# XTreeNode
def getChildAt(self, index):
return self.children[index]
def getChildCount(self):
return len(self.children)
def getParent(self):
return self.parent
def getIndex(self, node):
index = -1
try:
index = self.children.index(node)
except:
pass
return index
def hasChildrenOnDemand(self):
return self.ondemand
def getDisplayValue(self):
return self.name
def getNodeGraphicURL(self):
return ""
def getExpandedGraphicURL(self):
return ""
def getCollapsedGraphicURL(self):
return ""
def set_name(self, text):
""" Set display text. """
self.name = text
self.data_model.changed((self, ), self.parent)
def get_parent(self):
""" Get parent node. """
return self.parent
def set_parent(self, parent):
""" Set parent node. """
self.parent = parent
def has_parent(self):
""" Check node has parent. """
return not self.parent is None
def get_child_count(self):
""" Returns number of children. """
return len(self.children)
def has_children(self):
""" Check is children. """
return len(self.children)
def get_children(self):
""" Retuns list of children. """
return self.children
def get_child_at(self, index):
""" Get child by its position. """
return self.children[index]
def append_child(self, node, broadcast=True):
""" Append child node. """
if not node.has_parent():
self.children.append(node)
node.set_parent(self)
if broadcast:
self.data_model.inserted((node, ), self)
def insert_child(self, index, node):
""" Insert node at index. """
self.children.insert(index, node)
node.set_parent(self)
self.data_model.inserted((node, ), self)
def remove_child_at(self, index):
""" Remove specific node at index. """
try:
self.children.pop(index)
self.data_model.removed((node, ), self)
except:
pass
def remove_child(self, node):
""" Remove child node. """
try:
self.children.remove(node)
self.data_model.removed((node, ), self)
except:
pass
def get_data(self):
""" Get data value. """
return self.data
def set_data(self, data):
""" Set data value. """
self.data = data
def find_node_by_data(self, data):
""" Find node having data as its data. """
if self.data == data:
return self
for child in self.children:
if child.get_data() == data:
return child
if child.has_children():
found = child.find_node_by_data(data)
if found:
return found
return None
def in_children(self, node):
""" Check node is sub node of this node. """
for child in self.children:
if child == node:
return True
if child.has_children():
found = child.in_children(node)
if found:
return True
return False
def in_parent(self, node):
""" Check node is one of parent in tree. """
parent = self.parent
while parent:
if parent == node:
return True
parent = parent.get_parent()
return False
def move_child(self, source_index, dest_index):
""" Move inner child container. """
if 0 <= source_index < len(self.children) and \
0 <= dest_index < len(self.children) and \
source_index != dest_index:
item = self.children[source_index]
if source_index < dest_index:
dest_index += 1
self.children.insert(dest_index, item)
if source_index > dest_index:
source_index += 1
self.children.pop(source_index)
def request_structure_update(self):
self.data_model.structure_changed(self)
class TreeRootNode(CustomTreeNode):
""" Root. """
from bookmarks import EXT_DIR
from com.sun.star.lang import XComponent
class Component(XComponent):
""" For life-time management. """
def __init__(self):
self.event_listeners = []
def dispose(self):
for listener in self.event_listeners:
try:
listener.disposing(self)
except:
pass
def addEventListener(self, listener):
try:
self.event_listeners.index(listener)
except:
self.event_listeners.append(listener)
def removeEventListener(self, listener):
try:
self.event_listeners.remove(listener)
except:
pass
class CustomTreeDataModel(unohelper.Base, Component, XTreeDataModel):
""" Keeps CustomTreeNode as nodes. """
def __init__(self):
Component.__init__(self)
self.listeners = []
self.root = None
self.node_counter = 0
self.nodes = {} # all child nodes
def register_node(self, node):
node.id = self.create_node_id()
self.nodes[node.id] = node
def create_node_id(self):
self.node_counter += 1
return self.node_counter
def get_node(self, tree_node):
try:
return self.nodes[tree_node.getMaterial()]
except:
return None
# XTreeDataModel
def getRoot(self):
return self.root
def addTreeDataModelListener(self, listener):
self.listeners.insert(0, listener)
def removeTreeDataModelListener(self, listener):
try:
while True:
self.listeners.remove(listener)
except:
pass
def get_root(self):
""" Get root node. """
return self.root
def set_root(self, node):
""" Set root node. """
self.root = node
self.structure_changed(node)
def create_node(self, name, ondemand=False):
""" Create new node. """
return CustomTreeNode(self, name, ondemand)
def create_root(self, name, ondemand=False):
""" Create new root. """
return TreeRootNode(self, name, ondemand)
def changed(self, nodes, parent):
self.broadcast("treeNodesChanged", nodes, parent)
def inserted(self, nodes, parent):
self.broadcast("treeNodesInserted", nodes, parent)
def removed(self, nodes, parent):
self.broadcast("treeNodesRemoved", nodes, parent)
def structure_changed(self, node):
self.broadcast("treeStructureChanged", (), node)
def broadcast(self, type, nodes, parent):
ev = TreeDataModelEvent(self, nodes, parent)
for listener in self.listeners:
try:
getattr(listener, type)(ev)
except Exception as e:
print(e)
from bookmarks import ICONS_DIR
is_high_contrast = False
def get_icon_name(name):
suffix = ".png"
if is_high_contrast:
suffix = "_h" + suffix
return ICONS_DIR + name + suffix
class NodeIcon(object):
def __init__(self):
self._graphic_url = get_icon_name(self.GRAPHIC_URL)
def getNodeGraphicURL(self):
return self._graphic_url
class BookmarksNode(object):
pass
class BookmarksMenuTreeContainerNode(NodeIcon, CustomTreeNode, BookmarksNode):
GRAPHIC_URL = "folder_16"
def __init__(self, datamodel, name, ondemand=True):
CustomTreeNode.__init__(self, datamodel, name, ondemand)
NodeIcon.__init__(self)
class BookmarksMenuTreeRootNode(NodeIcon, TreeRootNode, BookmarksNode):
GRAPHIC_URL = "bookmarks_16"
def __init__(self, datamodel, name, ondemand=True):
CustomTreeNode.__init__(self, datamodel, name, ondemand)
NodeIcon.__init__(self)
class TagNode(object):
pass
class TagsTreeContainerNode(NodeIcon, CustomTreeNode, TagNode):
GRAPHIC_URL = "tag_16"
def __init__(self, datamodel, name, ondemand=True):
CustomTreeNode.__init__(self, datamodel, name, ondemand)
NodeIcon.__init__(self)
class TagsTreeRootNode(NodeIcon, TreeRootNode, TagNode):
GRAPHIC_URL = "tags_16"
def __init__(self, datamodel, name, ondemand=True):
TreeRootNode.__init__(self, datamodel, name, ondemand)
NodeIcon.__init__(self)
class HistoryRootNode(NodeIcon, CustomTreeNode):
GRAPHIC_URL = "history_16"
def __init__(self, datamodel, name, ondemand=True):
CustomTreeNode.__init__(self, datamodel, name, ondemand)
NodeIcon.__init__(self)
class UnsortedBookmarksRootNode(NodeIcon, TreeRootNode):
GRAPHIC_URL = "unsorted_16"
def __init__(self, datamodel, name, ondemand=True):
TreeRootNode.__init__(self, datamodel, name, ondemand)
NodeIcon.__init__(self)
class BookmarksMenuTreeDataModel(CustomTreeDataModel):
def create_node(self, name, ondemand=True):
return BookmarksMenuTreeContainerNode(self, name)
def create_root(self, name, ondemand=True):
return TreeRootNode(self, name, ondemand)
def create_bookmarks_root(self, name, ondemand=True):
return BookmarksMenuTreeRootNode(self, name, ondemand)
def create_tag_node(self, name, ondemand=False):
return TagsTreeContainerNode(self, name, ondemand)
def create_tags_root(self, name, ondemand=False):
return TagsTreeRootNode(self, name, ondemand)
def create_history_root(self, name, ondemand=False):
return HistoryRootNode(self, name, False)
def create_unsorted_root(self, name, ondemand=True):
return UnsortedBookmarksRootNode(self, name, True)
| |
import numpy as np
import tensorflow as tf
import pickle
import sys
import io
import os
try:
nonlinearity_name = sys.argv[1] # 'relu', 'elu', 'gelu', or 'silu'
except:
print('Defaulted to gelu since no nonlinearity specified through command line')
nonlinearity_name = 'gelu'
try:
learning_rate = float(sys.argv[2]) # 0.001, 0.0001, 0.00001
except:
print('Defaulted to a learning rate of 0.001')
learning_rate = 1e-3
p = 0.8
#
# Begin Twitter Helper Functions
#
def embeddings_to_dict(filename):
'''
:param filename: the file name of the word embeddings | file is assumed
to follow this format: "word[tab]dimension 1[space]dimension 2[space]...[space]dimension 50"
:return: a dictionary with keys that are words and values that are the embedding of a word
'''
with io.open(filename, 'r', encoding='utf-8') as f:
word_vecs = {}
for line in f:
line = line.strip('\n').split()
word_vecs[line[0]] = np.array([float(s) for s in line[1:]])
return word_vecs
def data_to_mat(filename, vocab, tag_to_number, window_size=1, start_symbol=u'UUUNKKK',
one_hot=False, return_labels=True):
'''
:param filename: the filename of a training, development, devtest, or test set
:param vocab: a list of strings, one for each embedding (the keys of a dictionary)
:param tag_to_number: a dictionary of tags to predict and a numerical encoding of those tags;
with this, we will predict numbers instead of strings
:param window_size: the context window size for the left and right; thus we have 2*window_size + 1
words considered at a time
:param start_symbol: since the <s> symbol has no embedding given, chose a symbol in the vocab
to replace <s>. Common choices are u'UUUNKKK' or u'</s>'
:return: a n x (window_size*2 + 1) matrix containing context windows and the center word
represented as strings; n is the number of examples. ALSO return a n x |tag_to_number|
matrix of labels for the n examples with a one-hot (1-of-k) encoding
'''
with io.open(filename, 'r', encoding='utf-8') as f:
x, tweet_words, y = [], [], []
start = True
for line in f:
line = line.strip('\n')
if len(line) == 0: # if end of tweet
tweet_words.extend([u'</s>'] * window_size)
# ensure tweet words are in vocab; if not, map to "UUUNKKK"
tweet_words = [w if w in vocab else u'UUUNKKK' for w in tweet_words]
# from this tweet, add the training tasks to dataset
# the tags were already added to y
for i in range(window_size, len(tweet_words) - window_size):
x.append(tweet_words[i-window_size:i+window_size+1])
tweet_words = []
start = True
continue
# if before end
word, label = line.split('\t')
if start:
tweet_words.extend([start_symbol] * window_size)
start = False
tweet_words.append(word)
if return_labels is True:
if one_hot is True:
label_one_hot = len(tag_to_number) * [0]
label_one_hot[tag_to_number[label]] += 1
y.append(label_one_hot)
else:
y.append(tag_to_number[label])
return np.array(x), np.array(y)
def word_list_to_embedding(words, embeddings, embedding_dimension=50):
'''
:param words: an n x (2*window_size + 1) matrix from data_to_mat
:param embeddings: an embedding dictionary where keys are strings and values
are embeddings; the output from embeddings_to_dict
:param embedding_dimension: the dimension of the values in embeddings; in this
assignment, embedding_dimension=50
:return: an n x ((2*window_size + 1)*embedding_dimension) matrix where each entry of the
words matrix is replaced with its embedding
'''
m, n = words.shape
words = words.reshape((-1))
return np.array([embeddings[w] for w in words], dtype=np.float32).reshape(m, n*embedding_dimension)
#
# End Twitter Helper Functions
#
window_size = 1
# note that we encode the tags with numbers for later convenience
tag_to_number = {
u'N': 0, u'O': 1, u'S': 2, u'^': 3, u'Z': 4, u'L': 5, u'M': 6,
u'V': 7, u'A': 8, u'R': 9, u'!': 10, u'D': 11, u'P': 12, u'&': 13, u'T': 14,
u'X': 15, u'Y': 16, u'#': 17, u'@': 18, u'~': 19, u'U': 20, u'E': 21, u'$': 22,
u',': 23, u'G': 24
}
embeddings = embeddings_to_dict('./data/Tweets/embeddings-twitter.txt')
vocab = embeddings.keys()
# we replace <s> with </s> since it has no embedding, and </s> is a better embedding than UNK
xt, yt = data_to_mat('./data/Tweets/tweets-train.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
xdev, ydev = data_to_mat('./data/Tweets/tweets-dev.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
xdtest, ydtest = data_to_mat('./data/Tweets/tweets-devtest.txt', vocab, tag_to_number, window_size=window_size,
start_symbol=u'</s>')
data = {
'x_train': xt, 'y_train': yt,
'x_dev': xdev, 'y_dev': ydev,
'x_test': xdtest, 'y_test': ydtest
}
num_epochs = 30
num_tags = 25
hidden_size = 256
batch_size = 16
embedding_dimension = 50
example_size = (2*window_size + 1)*embedding_dimension
num_examples = data['y_train'].shape[0]
num_batches = num_examples//batch_size
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, [None, example_size])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool)
w1 = tf.Variable(tf.nn.l2_normalize(tf.random_normal([example_size, hidden_size]), 0))
b1 = tf.Variable(tf.zeros([hidden_size]))
w2 = tf.Variable(tf.nn.l2_normalize(tf.random_normal([hidden_size, hidden_size]), 0))
b2 = tf.Variable(tf.zeros([hidden_size]))
w_out = tf.Variable(tf.nn.l2_normalize(tf.random_normal([hidden_size, num_tags]), 0))
b_out = tf.Variable(tf.zeros([num_tags]))
if nonlinearity_name == 'relu':
f = tf.nn.relu
elif nonlinearity_name == 'elu':
f = tf.nn.elu
elif nonlinearity_name == 'gelu':
# def gelu(x):
# return tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.)
# f = gelu
def gelu_fast(_x):
return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
f = gelu_fast
elif nonlinearity_name == 'silu':
def silu(_x):
return _x * tf.sigmoid(_x)
f = silu
# elif nonlinearity_name == 'soi':
# def soi_map(x):
# u = tf.random_uniform(tf.shape(x))
# mask = tf.to_float(tf.less(u, (1 + tf.erf(x / tf.sqrt(2.))) / 2.))
# return tf.cond(is_training, lambda: tf.mul(mask, x),
# lambda: tf.mul(x, tf.erfc(-x / tf.sqrt(2.)) / 2.))
# f = soi_map
else:
raise NameError("Need 'relu', 'elu', 'gelu', or 'silu' for nonlinearity_name")
def model(data_feed):
h1 = f(tf.matmul(data_feed, w1) + b1)
h1 = tf.cond(is_training, lambda: tf.nn.dropout(h1, p), lambda: h1)
h2 = f(tf.matmul(h1, w2) + b2)
h2 = tf.cond(is_training, lambda: tf.nn.dropout(h2, p), lambda: h2)
return tf.matmul(h2, w_out) + b_out
logits = model(x)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, y))
# pick optimizer
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
compute_error = tf.reduce_mean(tf.to_float(tf.not_equal(tf.argmax(logits, 1), y)))
# store future results with previous results
if not os.path.exists("./data/"):
os.makedirs("./data/")
if os.path.exists("./data/twitter_pos_" + nonlinearity_name + ".p"):
history = pickle.load(open("./data/twitter_pos_" + nonlinearity_name + ".p", "rb"))
key_str = str(len(history)//7 + 1)
history["lr" + key_str] = learning_rate
history["train_loss" + key_str] = []
history["val_loss" + key_str] = []
history["test_loss" + key_str] = []
history["train_err" + key_str] = []
history["val_err" + key_str] = []
history["test_err" + key_str] = []
else:
history = {
"lr1": learning_rate,
'train_loss1': [], 'val_loss1': [], 'test_loss1': [],
'train_err1': [], 'val_err1': [], 'test_err1': []
}
key_str = '1'
with tf.Session(graph=graph) as sess:
print('Beginning training')
sess.run(tf.initialize_all_variables())
save_every = num_batches//5 # save training information 5 times per epoch
# train
for epoch in range(num_epochs):
# shuffle data every epoch
indices = np.arange(num_examples)
np.random.shuffle(indices)
data['x_train'] = data['x_train'][indices]
data['y_train'] = data['y_train'][indices]
for i in range(num_batches):
offset = i * batch_size
bx = word_list_to_embedding(data['x_train'][offset:offset + batch_size, :],
embeddings, embedding_dimension)
by = data['y_train'][offset:offset + batch_size]
if p < 1-1e-5: # we want to know how the full network is being optimized instead of the reduced version
l, err = sess.run([loss, compute_error], feed_dict={x: bx, y: by, is_training: False})
_, l_drop, err_drop = sess.run([optimizer, loss, compute_error], feed_dict={x: bx, y: by,
is_training: True})
if p < 1-1e-5: # we want to know how the full network is being optimized instead of the reduced version
history["train_loss" + key_str].append(l)
history["train_err" + key_str].append(err)
else:
history["train_loss" + key_str].append(l_drop)
history["train_err" + key_str].append(err_drop)
if i % save_every == 0:
l, err = sess.run([loss, compute_error],
feed_dict={x: word_list_to_embedding(data['x_dev'], embeddings, embedding_dimension),
y: data['y_dev'], is_training: False})
history["val_loss" + key_str].append(l)
history["val_err" + key_str].append(err)
l, err = sess.run([loss, compute_error],
feed_dict={x: word_list_to_embedding(data['x_test'], embeddings, embedding_dimension),
y: data['y_test'], is_training: False})
history["test_loss" + key_str].append(l)
history["test_err" + key_str].append(err)
# print('Epoch', epoch + 1, 'Complete')
# save history
pickle.dump(history, open("./data/twitter_pos_" + nonlinearity_name + ".p", "wb"))
| |
import os
import typing as t
from gettext import gettext as _
from gettext import ngettext
from ._compat import get_text_stderr
from .utils import echo
if t.TYPE_CHECKING:
from .core import Context
from .core import Parameter
def _join_param_hints(
param_hint: t.Optional[t.Union[t.Sequence[str], str]]
) -> t.Optional[str]:
if param_hint is not None and not isinstance(param_hint, str):
return " / ".join(repr(x) for x in param_hint)
return param_hint
class ClickException(Exception):
"""An exception that Click can handle and show to the user."""
#: The exit code for this exception.
exit_code = 1
def __init__(self, message: str) -> None:
super().__init__(message)
self.message = message
def format_message(self) -> str:
return self.message
def __str__(self) -> str:
return self.message
def show(self, file: t.Optional[t.IO] = None) -> None:
if file is None:
file = get_text_stderr()
echo(_("Error: {message}").format(message=self.format_message()), file=file)
class UsageError(ClickException):
"""An internal exception that signals a usage error. This typically
aborts any further handling.
:param message: the error message to display.
:param ctx: optionally the context that caused this error. Click will
fill in the context automatically in some situations.
"""
exit_code = 2
def __init__(self, message: str, ctx: t.Optional["Context"] = None) -> None:
super().__init__(message)
self.ctx = ctx
self.cmd = self.ctx.command if self.ctx else None
def show(self, file: t.Optional[t.IO] = None) -> None:
if file is None:
file = get_text_stderr()
color = None
hint = ""
if (
self.ctx is not None
and self.ctx.command.get_help_option(self.ctx) is not None
):
hint = _("Try '{command} {option}' for help.").format(
command=self.ctx.command_path, option=self.ctx.help_option_names[0]
)
hint = f"{hint}\n"
if self.ctx is not None:
color = self.ctx.color
echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color)
echo(
_("Error: {message}").format(message=self.format_message()),
file=file,
color=color,
)
class BadParameter(UsageError):
"""An exception that formats out a standardized error message for a
bad parameter. This is useful when thrown from a callback or type as
Click will attach contextual information to it (for instance, which
parameter it is).
.. versionadded:: 2.0
:param param: the parameter object that caused this error. This can
be left out, and Click will attach this info itself
if possible.
:param param_hint: a string that shows up as parameter name. This
can be used as alternative to `param` in cases
where custom validation should happen. If it is
a string it's used as such, if it's a list then
each item is quoted and separated.
"""
def __init__(
self,
message: str,
ctx: t.Optional["Context"] = None,
param: t.Optional["Parameter"] = None,
param_hint: t.Optional[str] = None,
) -> None:
super().__init__(message, ctx)
self.param = param
self.param_hint = param_hint
def format_message(self) -> str:
if self.param_hint is not None:
param_hint = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx) # type: ignore
else:
return _("Invalid value: {message}").format(message=self.message)
return _("Invalid value for {param_hint}: {message}").format(
param_hint=_join_param_hints(param_hint), message=self.message
)
class MissingParameter(BadParameter):
"""Raised if click required an option or argument but it was not
provided when invoking the script.
.. versionadded:: 4.0
:param param_type: a string that indicates the type of the parameter.
The default is to inherit the parameter type from
the given `param`. Valid values are ``'parameter'``,
``'option'`` or ``'argument'``.
"""
def __init__(
self,
message: t.Optional[str] = None,
ctx: t.Optional["Context"] = None,
param: t.Optional["Parameter"] = None,
param_hint: t.Optional[str] = None,
param_type: t.Optional[str] = None,
) -> None:
super().__init__(message or "", ctx, param, param_hint)
self.param_type = param_type
def format_message(self) -> str:
if self.param_hint is not None:
param_hint: t.Optional[str] = self.param_hint
elif self.param is not None:
param_hint = self.param.get_error_hint(self.ctx) # type: ignore
else:
param_hint = None
param_hint = _join_param_hints(param_hint)
param_hint = f" {param_hint}" if param_hint else ""
param_type = self.param_type
if param_type is None and self.param is not None:
param_type = self.param.param_type_name
msg = self.message
if self.param is not None:
msg_extra = self.param.type.get_missing_message(self.param)
if msg_extra:
if msg:
msg += f". {msg_extra}"
else:
msg = msg_extra
msg = f" {msg}" if msg else ""
# Translate param_type for known types.
if param_type == "argument":
missing = _("Missing argument")
elif param_type == "option":
missing = _("Missing option")
elif param_type == "parameter":
missing = _("Missing parameter")
else:
missing = _("Missing {param_type}").format(param_type=param_type)
return f"{missing}{param_hint}.{msg}"
def __str__(self) -> str:
if not self.message:
param_name = self.param.name if self.param else None
return _("Missing parameter: {param_name}").format(param_name=param_name)
else:
return self.message
class NoSuchOption(UsageError):
"""Raised if click attempted to handle an option that does not
exist.
.. versionadded:: 4.0
"""
def __init__(
self,
option_name: str,
message: t.Optional[str] = None,
possibilities: t.Optional[t.Sequence[str]] = None,
ctx: t.Optional["Context"] = None,
) -> None:
if message is None:
message = _("No such option: {name}").format(name=option_name)
super().__init__(message, ctx)
self.option_name = option_name
self.possibilities = possibilities
def format_message(self) -> str:
if not self.possibilities:
return self.message
possibility_str = ", ".join(sorted(self.possibilities))
suggest = ngettext(
"Did you mean {possibility}?",
"(Possible options: {possibilities})",
len(self.possibilities),
).format(possibility=possibility_str, possibilities=possibility_str)
return f"{self.message} {suggest}"
class BadOptionUsage(UsageError):
"""Raised if an option is generally supplied but the use of the option
was incorrect. This is for instance raised if the number of arguments
for an option is not correct.
.. versionadded:: 4.0
:param option_name: the name of the option being used incorrectly.
"""
def __init__(
self, option_name: str, message: str, ctx: t.Optional["Context"] = None
) -> None:
super().__init__(message, ctx)
self.option_name = option_name
class BadArgumentUsage(UsageError):
"""Raised if an argument is generally supplied but the use of the argument
was incorrect. This is for instance raised if the number of values
for an argument is not correct.
.. versionadded:: 6.0
"""
class FileError(ClickException):
"""Raised if a file cannot be opened."""
def __init__(self, filename: str, hint: t.Optional[str] = None) -> None:
if hint is None:
hint = _("unknown error")
super().__init__(hint)
self.ui_filename = os.fsdecode(filename)
self.filename = filename
def format_message(self) -> str:
return _("Could not open file {filename!r}: {message}").format(
filename=self.ui_filename, message=self.message
)
class Abort(RuntimeError):
"""An internal signalling exception that signals Click to abort."""
class Exit(RuntimeError):
"""An exception that indicates that the application should exit with some
status code.
:param code: the status code to exit with.
"""
__slots__ = ("exit_code",)
def __init__(self, code: int = 0) -> None:
self.exit_code = code
| |
import random
import logging
import os
import claripy
from ...sim_type import SimTypeFunction, SimTypeInt
from ... import sim_options as so
from ... import SIM_LIBRARIES
from ... import BP_BEFORE, BP_AFTER
from ...storage.file import SimFile, SimFileDescriptor
from ...state_plugins import SimSystemPosix
from ...errors import AngrCallableMultistateError, AngrCallableError, AngrError, SimError
from .custom_callable import IdentifierCallable
l = logging.getLogger(name=__name__)
flag_loc = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../example_flag_page'))
try:
with open(flag_loc, "rb") as f:
FLAG_DATA = f.read()
except IOError:
FLAG_DATA = b"A"*0x1000
assert len(FLAG_DATA) == 0x1000
class Runner(object):
def __init__(self, project, cfg):
# this is kind of fucked up
project.simos.syscall_library.update(SIM_LIBRARIES['cgcabi_tracer'])
self.project = project
self.cfg = cfg
self.base_state = None
def _get_recv_state(self):
try:
options = set()
options.add(so.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY)
options.add(so.CGC_NO_SYMBOLIC_RECEIVE_LENGTH)
options.add(so.TRACK_MEMORY_MAPPING)
options.add(so.AVOID_MULTIVALUED_READS)
options.add(so.AVOID_MULTIVALUED_WRITES)
# try to enable unicorn, continue if it doesn't exist
options.add(so.UNICORN)
l.info("unicorn tracing enabled")
remove_options = so.simplification | { so.LAZY_SOLVES } | so.resilience | { so.SUPPORT_FLOATING_POINT }
add_options = options
entry_state = self.project.factory.entry_state(
add_options=add_options,
remove_options=remove_options)
# map the CGC flag page
fake_flag_data = entry_state.solver.BVV(FLAG_DATA)
entry_state.memory.store(0x4347c000, fake_flag_data)
# map the place where I put arguments
entry_state.memory.map_region(0x2000, 0x10000, 7)
entry_state.unicorn._register_check_count = 100
entry_state.unicorn._runs_since_symbolic_data = 100
entry_state.unicorn._runs_since_unicorn = 100
# cooldowns
entry_state.unicorn.cooldown_symbolic_stop = 2
entry_state.unicorn.cooldown_unsupported_stop = 2
entry_state.unicorn.cooldown_nonunicorn_blocks = 1
entry_state.unicorn.max_steps = 10000
pg = self.project.factory.simulation_manager(entry_state)
stop_addr = self.project.simos.syscall_from_number(2).addr
num_steps = 0
while len(pg.active) > 0:
if pg.one_active.addr == stop_addr:
# execute until receive
break
if len(pg.active) > 1:
pp = pg.one_active
pg = self.project.factory.simulation_manager(pp)
pg.step()
num_steps += 1
if num_steps > 50:
break
if len(pg.active) > 0:
out_state = pg.one_active
elif len(pg.deadended) > 0:
out_state = pg.deadended[0]
else:
return self.project.factory.entry_state()
out_state.scratch.clear()
out_state.history.jumpkind = "Ijk_Boring"
return out_state
except SimError as e:
l.warning("SimError in get recv state %s", e)
return self.project.factory.entry_state()
except AngrError as e:
l.warning("AngrError in get recv state %s", e)
return self.project.factory.entry_state()
def setup_state(self, function, test_data, initial_state=None, concrete_rand=False):
# FIXME fdwait should do something concrete...
if initial_state is None:
if self.base_state is None:
self.base_state = self._get_recv_state()
entry_state = self.base_state.copy()
else:
entry_state = initial_state.copy()
stdin = SimFile('stdin', content=test_data.preloaded_stdin)
stdout = SimFile('stdout')
stderr = SimFile('stderr')
fd = {0: SimFileDescriptor(stdin, 0), 1: SimFileDescriptor(stdout, 0), 2: SimFileDescriptor(stderr, 0)}
entry_state.register_plugin('posix', SimSystemPosix(stdin=stdin, stdout=stdout, stderr=stderr, fd=fd))
entry_state.options.add(so.STRICT_PAGE_ACCESS)
# make sure unicorn will run
for k in dir(entry_state.regs):
r = getattr(entry_state.regs, k)
if r.symbolic:
setattr(entry_state.regs, k, 0)
entry_state.unicorn._register_check_count = 100
entry_state.unicorn._runs_since_symbolic_data = 100
entry_state.unicorn._runs_since_unicorn = 100
# cooldowns
entry_state.unicorn.cooldown_symbolic_stop = 2
entry_state.unicorn.cooldown_unsupported_stop = 2
entry_state.unicorn.cooldown_nonunicorn_blocks = 1
entry_state.unicorn.max_steps = 10000
# syscall hook
entry_state.inspect.b(
'syscall',
BP_BEFORE,
action=self.syscall_hook
)
if concrete_rand:
entry_state.inspect.b(
'syscall',
BP_AFTER,
action=self.syscall_hook_concrete_rand
)
# solver timeout
entry_state.solver._solver.timeout = 500
return entry_state
@staticmethod
def syscall_hook(state):
# FIXME maybe we need to fix transmit/receive to handle huge vals properly
# kill path that try to read/write large amounts
syscall_name = state.inspect.syscall_name
if syscall_name == "transmit":
count = state.solver.eval(state.regs.edx)
if count > 0x10000:
state.regs.edx = 0
state.add_constraints(claripy.BoolV(False))
if syscall_name == "receive":
count = state.solver.eval(state.regs.edx)
if count > 0x10000:
state.regs.edx = 0
state.add_constraints(claripy.BoolV(False))
if syscall_name == "random":
count = state.solver.eval(state.regs.ecx)
if count > 0x1000:
state.regs.ecx = 0
state.add_constraints(claripy.BoolV(False))
@staticmethod
def syscall_hook_concrete_rand(state):
# FIXME maybe we need to fix transmit/receive to handle huge vals properly
# kill path that try to read/write large amounts
syscall_name = state.inspect.syscall_name
if syscall_name == "random":
count = state.solver.eval(state.regs.ecx)
if count > 100:
return
buf = state.solver.eval(state.regs.ebx)
for i in range(count):
a = random.randint(0, 255)
state.memory.store(buf+i, state.solver.BVV(a, 8))
def get_base_call_state(self, function, test_data, initial_state=None, concrete_rand=False):
curr_buf_loc = 0x2000
mapped_input = []
s = self.setup_state(function, test_data, initial_state, concrete_rand=concrete_rand)
for i in test_data.input_args:
if isinstance(i, (bytes, claripy.ast.BV)):
s.memory.store(curr_buf_loc, i)
mapped_input.append(curr_buf_loc)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/bytes got %s" % type(i))
mapped_input.append(i)
cc = self.project.factory.cc()
call = IdentifierCallable(self.project, function.startpoint.addr, concrete_only=True,
cc=cc, base_state=s, max_steps=test_data.max_steps)
return call.get_base_state(*mapped_input)
def test(self, function, test_data, concrete_rand=False, custom_offs=None):
curr_buf_loc = 0x2000
mapped_input = []
s = self.setup_state(function, test_data, concrete_rand=concrete_rand)
if custom_offs is None:
for i in test_data.input_args:
if isinstance(i, bytes):
s.memory.store(curr_buf_loc, i + b"\x00")
mapped_input.append(curr_buf_loc)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/str got %s" % type(i))
mapped_input.append(i)
else:
for i, off in zip(test_data.input_args, custom_offs):
if isinstance(i, bytes):
s.memory.store(curr_buf_loc, i + b"\x00")
mapped_input.append(curr_buf_loc+off)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/str got %s" % type(i))
mapped_input.append(i)
cc = self.project.factory.cc()
try:
call = IdentifierCallable(self.project, function.startpoint.addr, concrete_only=True,
cc=cc, base_state=s, max_steps=test_data.max_steps)
result = call(*mapped_input)
result_state = call.result_state
except AngrCallableMultistateError as e:
l.info("multistate error: %s", e)
return False
except AngrCallableError as e:
l.info("other callable error: %s", e)
return False
# check matches
outputs = []
for i, out in enumerate(test_data.expected_output_args):
if isinstance(out, bytes):
if len(out) == 0:
raise Exception("len 0 out")
outputs.append(result_state.memory.load(mapped_input[i], len(out)))
else:
outputs.append(None)
tmp_outputs = outputs
outputs = []
for out in tmp_outputs:
if out is None:
outputs.append(None)
elif result_state.solver.symbolic(out):
l.info("symbolic memory output")
return False
else:
outputs.append(result_state.solver.eval(out, cast_to=bytes))
if outputs != test_data.expected_output_args:
# print map(lambda x: x.encode('hex'), [a for a in outputs if a is not None]), map(lambda x: x.encode('hex'), [a for a in test_data.expected_output_args if a is not None])
l.info("mismatch output")
return False
if result_state.solver.symbolic(result):
l.info("result value sybolic")
return False
if test_data.expected_return_val is not None and test_data.expected_return_val < 0:
test_data.expected_return_val &= (2**self.project.arch.bits - 1)
if test_data.expected_return_val is not None and \
result_state.solver.eval(result) != test_data.expected_return_val:
l.info("return val mismatch got %#x, expected %#x", result_state.solver.eval(result), test_data.expected_return_val)
return False
if result_state.solver.symbolic(result_state.posix.stdout.size):
l.info("symbolic stdout pos")
return False
if result_state.solver.eval(result_state.posix.stdout.size) == 0:
stdout = ""
else:
stdout = result_state.posix.stdout.load(0, result_state.posix.stdout.size)
if stdout.symbolic:
l.info("symbolic stdout")
return False
stdout = result_state.solver.eval(stdout, cast_to=bytes)
if stdout != test_data.expected_stdout:
l.info("mismatch stdout")
return False
return True
def get_out_state(self, function, test_data, initial_state=None, concrete_rand=False, custom_offs=None):
curr_buf_loc = 0x2000
mapped_input = []
s = self.setup_state(function, test_data, initial_state, concrete_rand=concrete_rand)
if custom_offs is None:
for i in test_data.input_args:
if isinstance(i, bytes):
s.memory.store(curr_buf_loc, i + b"\x00")
mapped_input.append(curr_buf_loc)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/bytes got %s" % type(i))
mapped_input.append(i)
else:
for i, off in zip(test_data.input_args, custom_offs):
if isinstance(i, bytes):
s.memory.store(curr_buf_loc, i + b"\x00")
mapped_input.append(curr_buf_loc+off)
curr_buf_loc += max(len(i), 0x1000)
else:
if not isinstance(i, int):
raise Exception("Expected int/bytes got %s" % type(i))
mapped_input.append(i)
cc = self.project.factory.cc()
try:
call = IdentifierCallable(self.project, function.startpoint.addr, concrete_only=True,
cc=cc, base_state=s, max_steps=test_data.max_steps)
_ = call(*mapped_input)
result_state = call.result_state
except AngrCallableMultistateError as e:
l.info("multistate error: %s", e)
return None
except AngrCallableError as e:
l.info("other callable error: %s", e)
return None
return result_state
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.