hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f9bb64c3b13688df63ef2c1c6621c6981f22e05
| 16,251
|
py
|
Python
|
tqsdk/objs.py
|
shinny-hongyan/tqsdk-python
|
cd130944148fba15ae10a49e77e62a9a6c551cfc
|
[
"Apache-2.0"
] | null | null | null |
tqsdk/objs.py
|
shinny-hongyan/tqsdk-python
|
cd130944148fba15ae10a49e77e62a9a6c551cfc
|
[
"Apache-2.0"
] | null | null | null |
tqsdk/objs.py
|
shinny-hongyan/tqsdk-python
|
cd130944148fba15ae10a49e77e62a9a6c551cfc
|
[
"Apache-2.0"
] | null | null | null |
#:!/usr/bin/env python
#: -*- coding: utf-8 -*-
__author__ = 'chengzhi'
import copy
import weakref
from collections.abc import MutableMapping
class Entity(MutableMapping):
def _instance_entity(self, path):
self._path = path
self._listener = weakref.WeakSet()
def __setitem__(self, key, value):
return self.__dict__.__setitem__(key, value)
def __delitem__(self, key):
return self.__dict__.__delitem__(key)
def __getitem__(self, key):
return self.__dict__.__getitem__(key)
def __iter__(self):
return iter({k: v for k, v in self.__dict__.items() if not k.startswith("_")})
def __len__(self):
return len({k: v for k, v in self.__dict__.items() if not k.startswith("_")})
def __str__(self):
return str({k: v for k, v in self.__dict__.items() if not k.startswith("_")})
def __repr__(self):
return '{}, D({})'.format(super(Entity, self).__repr__(),
{k: v for k, v in self.__dict__.items() if not k.startswith("_")})
def copy(self):
return copy.copy(self)
class Quote(Entity):
""" Quote 是一个行情对象 """
def __init__(self, api):
self._api = api
#: 行情从交易所发出的时间(北京时间), 格式为 "2017-07-26 23:04:21.000001"
self.datetime = ""
#: 卖一价
self.ask_price1 = float("nan")
#: 卖一量
self.ask_volume1 = 0
#: 买一价
self.bid_price1 = float("nan")
#: 买一量
self.bid_volume1 = 0
#: 卖二价
self.ask_price2 = float("nan")
#: 卖二量
self.ask_volume2 = 0
#: 买二价
self.bid_price2 = float("nan")
#: 买二量
self.bid_volume2 = 0
#: 卖三价
self.ask_price3 = float("nan")
#: 卖三量
self.ask_volume3 = 0
#: 买三价
self.bid_price3 = float("nan")
#: 买三量
self.bid_volume3 = 0
#: 卖四价
self.ask_price4 = float("nan")
#: 卖四量
self.ask_volume4 = 0
#: 买四价
self.bid_price4 = float("nan")
#: 买四量
self.bid_volume4 = 0
#: 卖五价
self.ask_price5 = float("nan")
#: 卖五量
self.ask_volume5 = 0
#: 买五价
self.bid_price5 = float("nan")
#: 买五量
self.bid_volume5 = 0
#: 最新价
self.last_price = float("nan")
#: 当日最高价
self.highest = float("nan")
#: 当日最低价
self.lowest = float("nan")
#: 开盘价
self.open = float("nan")
#: 收盘价
self.close = float("nan")
#: 当日均价
self.average = float("nan")
#: 成交量
self.volume = 0
#: 成交额
self.amount = float("nan")
#: 持仓量
self.open_interest = 0
#: 结算价
self.settlement = float("nan")
#: 涨停价
self.upper_limit = float("nan")
#: 跌停价
self.lower_limit = float("nan")
#: 昨持仓量
self.pre_open_interest = 0
#: 昨结算价
self.pre_settlement = float("nan")
#: 昨收盘价
self.pre_close = float("nan")
#: 合约价格变动单位
self.price_tick = float("nan")
#: 合约价格小数位数
self.price_decs = 0
#: 合约乘数
self.volume_multiple = 0
#: 最大限价单手数
self.max_limit_order_volume = 0
#: 最大市价单手数
self.max_market_order_volume = 0
#: 最小限价单手数
self.min_limit_order_volume = 0
#: 最小市价单手数
self.min_market_order_volume = 0
#: 标的合约
self.underlying_symbol = ""
#: 行权价
self.strike_price = float("nan")
#: 合约类型
self.ins_class = ""
#: 交易所内的合约代码
self.instrument_id = ""
#: 合约是否已下市
self.expired = False
#: 交易时间段
self.trading_time = TradingTime(self._api)
#: 到期具体日
self.expire_datetime = float("nan")
#: 到期月
self.delivery_month = 0
#: 到期年
self.delivery_year = 0
#: 期权方向
self.option_class = ""
#: 品种代码
self.product_id = ""
def _instance_entity(self, path):
super(Quote, self)._instance_entity(path)
self.trading_time = copy.copy(self.trading_time)
self.trading_time._instance_entity(path + ["trading_time"])
class TradingTime(Entity):
""" TradingTime 是一个交易时间对象
它不是一个可单独使用的类,而是用于定义 Qoute 的 trading_time 字段的类型
(每个连续的交易时间段是一个列表,包含两个字符串元素,分别为这个时间段的起止点)"""
def __init__(self, api):
self._api = api
#: 白盘
self.day = []
#: 夜盘(注意:本字段中过了 24:00 的时间则在其基础往上加,如凌晨1点为 '25:00:00' )
self.night = []
class Kline(Entity):
""" Kline 是一个K线对象 """
def __init__(self, api):
self._api = api
#: K线起点时间(按北京时间),自unix epoch(1970-01-01 00:00:00 GMT)以来的纳秒数
self.datetime = 0
#: K线起始时刻的最新价
self.open = float("nan")
#: K线时间范围内的最高价
self.high = float("nan")
#: K线时间范围内的最低价
self.low = float("nan")
#: K线结束时刻的最新价
self.close = float("nan")
#: K线时间范围内的成交量
self.volume = 0
#: K线起始时刻的持仓量
self.open_oi = 0
#: K线结束时刻的持仓量
self.close_oi = 0
class Tick(Entity):
""" Tick 是一个tick对象 """
def __init__(self, api):
self._api = api
#: tick从交易所发出的时间(按北京时间),自unix epoch(1970-01-01 00:00:00 GMT)以来的纳秒数
self.datetime = 0
#: 最新价
self.last_price = float("nan")
#: 当日均价
self.average = float("nan")
#: 当日最高价
self.highest = float("nan")
#: 当日最低价
self.lowest = float("nan")
#: 卖1价
self.ask_price1 = float("nan")
#: 卖1量
self.ask_volume1 = 0
#: 买1价
self.bid_price1 = float("nan")
#: 买1量
self.bid_volume1 = 0
#: 卖2价
self.ask_price2 = float("nan")
#: 卖2量
self.ask_volume2 = 0
#: 买2价
self.bid_price2 = float("nan")
#: 买2量
self.bid_volume2 = 0
#: 卖3价
self.ask_price3 = float("nan")
#: 卖3量
self.ask_volume3 = 0
#: 买3价
self.bid_price3 = float("nan")
#: 买3量
self.bid_volume3 = 0
#: 卖4价
self.ask_price4 = float("nan")
#: 卖4量
self.ask_volume4 = 0
#: 买4价
self.bid_price4 = float("nan")
#: 买4量
self.bid_volume4 = 0
#: 卖5价
self.ask_price5 = float("nan")
#: 卖5量
self.ask_volume5 = 0
#: 买5价
self.bid_price5 = float("nan")
#: 买5量
self.bid_volume5 = 0
#: 当日成交量
self.volume = 0
#: 成交额
self.amount = float("nan")
#: 持仓量
self.open_interest = 0
class Account(Entity):
""" Account 是一个账户对象 """
def __init__(self, api):
self._api = api
#: 币种
self.currency = ""
#: 昨日账户权益(不包含期权)
self.pre_balance = float("nan")
#: 静态权益 (静态权益 = 昨日结算的权益 + 今日入金 - 今日出金, 以服务器查询ctp后返回的金额为准)(不包含期权)
self.static_balance = float("nan")
#: 账户权益 (账户权益 = 动态权益 = 静态权益 + 平仓盈亏 + 持仓盈亏 - 手续费 + 权利金 + 期权市值)
self.balance = float("nan")
#: 可用资金(可用资金 = 账户权益 - 冻结保证金 - 保证金 - 冻结权利金 - 冻结手续费 - 期权市值)
self.available = float("nan")
#: 期货公司返回的balance(ctp_balance = 静态权益 + 平仓盈亏 + 持仓盈亏 - 手续费 + 权利金)
self.ctp_balance = float("nan")
#: 期货公司返回的available(ctp_available = ctp_balance - 保证金 - 冻结保证金 - 冻结手续费 - 冻结权利金)
self.ctp_available = float("nan")
#: 浮动盈亏
self.float_profit = float("nan")
#: 持仓盈亏
self.position_profit = float("nan")
#: 本交易日内平仓盈亏
self.close_profit = float("nan")
#: 冻结保证金
self.frozen_margin = float("nan")
#: 保证金占用
self.margin = float("nan")
#: 冻结手续费
self.frozen_commission = float("nan")
#: 本交易日内交纳的手续费
self.commission = float("nan")
#: 冻结权利金
self.frozen_premium = float("nan")
#: 本交易日内收入-交纳的权利金
self.premium = float("nan")
#: 本交易日内的入金金额
self.deposit = float("nan")
#: 本交易日内的出金金额
self.withdraw = float("nan")
#: 风险度(风险度 = 保证金 / 账户权益)
self.risk_ratio = float("nan")
#: 期权市值
self.market_value = float("nan")
class Position(Entity):
""" Position 是一个持仓对象 """
def __init__(self, api):
self._api = api
#: 交易所
self.exchange_id = ""
#: 交易所内的合约代码
self.instrument_id = ""
#: 多头老仓手数
self.pos_long_his = 0
#: 多头今仓手数
self.pos_long_today = 0
#: 空头老仓手数
self.pos_short_his = 0
#: 空头今仓手数
self.pos_short_today = 0
#: 期货公司查询的多头今仓手数 (不推荐, 推荐使用pos_long_today)
self.volume_long_today = 0
#: 期货公司查询的多头老仓手数 (不推荐, 推荐使用pos_long_his)
self.volume_long_his = 0
#: 期货公司查询的多头手数 (不推荐, 推荐使用pos_long)
self.volume_long = 0
#: 期货公司查询的多头今仓冻结 (不推荐)
self.volume_long_frozen_today = 0
#: 期货公司查询的多头老仓冻结 (不推荐)
self.volume_long_frozen_his = 0
#: 期货公司查询的多头持仓冻结 (不推荐)
self.volume_long_frozen = 0
#: 期货公司查询的空头今仓手数 (不推荐, 推荐使用pos_short_today)
self.volume_short_today = 0
#: 期货公司查询的空头老仓手数 (不推荐, 推荐使用pos_short_his)
self.volume_short_his = 0
#: 期货公司查询的空头手数 (不推荐, 推荐使用pos_short)
self.volume_short = 0
#: 期货公司查询的空头今仓冻结 (不推荐)
self.volume_short_frozen_today = 0
#: 期货公司查询的空头老仓冻结 (不推荐)
self.volume_short_frozen_his = 0
#: 期货公司查询的空头持仓冻结 (不推荐)
self.volume_short_frozen = 0
#: 多头开仓均价
self.open_price_long = float("nan")
#: 空头开仓均价
self.open_price_short = float("nan")
#: 多头开仓成本
self.open_cost_long = float("nan")
#: 空头开仓成本
self.open_cost_short = float("nan")
#: 多头持仓均价
self.position_price_long = float("nan")
#: 空头持仓均价
self.position_price_short = float("nan")
#: 多头持仓成本
self.position_cost_long = float("nan")
#: 空头持仓成本
self.position_cost_short = float("nan")
#: 多头浮动盈亏
self.float_profit_long = float("nan")
#: 空头浮动盈亏
self.float_profit_short = float("nan")
#: 浮动盈亏 (浮动盈亏: 相对于开仓价的盈亏)
self.float_profit = float("nan")
#: 多头持仓盈亏
self.position_profit_long = float("nan")
#: 空头持仓盈亏
self.position_profit_short = float("nan")
#: 持仓盈亏 (持仓盈亏: 相对于上一交易日结算价的盈亏)
self.position_profit = float("nan")
#: 多头占用保证金
self.margin_long = float("nan")
#: 空头占用保证金
self.margin_short = float("nan")
#: 占用保证金
self.margin = float("nan")
#: 期权权利方市值(始终 >= 0)
self.market_value_long = float("nan")
#: 期权义务方市值(始终 <= 0)
self.market_value_short = float("nan")
#: 期权市值
self.market_value = float("nan")
@property
def pos(self):
"""
净持仓手数
:return: int, ==0表示无持仓或多空持仓手数相等. <0表示空头持仓大于多头持仓, >0表示多头持仓大于空头持仓
注: 本字段是由 pos_long 等字段计算出来的,而非服务器发回的原始数据中的字段,则:
1. is_changing() 是判断服务器发回的数据字段,因此不能用于 is_changing() 判断。
2. 在直接 print(position) 时不会显示出此字段。
3. 只能用 position.pos 方式取值,不能用 position["pos"] 方式。
4. pos_long, pos_short, orders这三个字段同理。
"""
return self.pos_long - self.pos_short
@property
def pos_long(self):
"""
多头持仓手数
:return: int, ==0表示无多头持仓. >0表示多头持仓手数
"""
return (self.pos_long_his + self.pos_long_today)
@property
def pos_short(self):
"""
空头持仓手数
:return: int, ==0表示无空头持仓. >0表示空头持仓手数
"""
return (self.pos_short_his + self.pos_short_today)
@property
def orders(self):
"""
与此持仓相关的开仓/平仓挂单
:return: dict, 其中每个元素的key为委托单ID, value为 :py:class:`~tqsdk.objs.Order`
"""
tdict = self._api._get_obj(self._api._data, ["trade", self._api._account._account_id, "orders"])
fts = {order_id: order for order_id, order in tdict.items() if (not order_id.startswith(
"_")) and order.instrument_id == self.instrument_id and order.exchange_id == self.exchange_id and order.status == "ALIVE"}
return fts
class Order(Entity):
""" Order 是一个委托单对象 """
def __init__(self, api):
self._api = api
#: 委托单ID, 对于一个用户的所有委托单,这个ID都是不重复的
self.order_id = ""
#: 交易所单号
self.exchange_order_id = ""
#: 交易所
self.exchange_id = ""
#: 交易所内的合约代码
self.instrument_id = ""
#: 下单方向, BUY=买, SELL=卖
self.direction = ""
#: 开平标志, OPEN=开仓, CLOSE=平仓, CLOSETODAY=平今
self.offset = ""
#: 总报单手数
self.volume_orign = 0
#: 未成交手数
self.volume_left = 0
#: 委托价格, 仅当 price_type = LIMIT 时有效
self.limit_price = float("nan")
#: 价格类型, ANY=市价, LIMIT=限价
self.price_type = ""
#: 手数条件, ANY=任何数量, MIN=最小数量, ALL=全部数量
self.volume_condition = ""
#: 时间条件, IOC=立即完成,否则撤销, GFS=本节有效, GFD=当日有效, GTC=撤销前有效, GFA=集合竞价有效
self.time_condition = ""
#: 下单时间,自unix epoch(1970-01-01 00:00:00 GMT)以来的纳秒数.
self.insert_date_time = 0
#: 委托单状态信息
self.last_msg = ""
#: 委托单状态, ALIVE=有效, FINISHED=已完
self.status = ""
self._this_session = False
@property
def is_dead(self):
"""
判定这个委托单是否确定已死亡(以后一定不会再产生成交)
:return: 确定委托单已死时,返回 True, 否则返回 False. 注意,返回 False 不代表委托单还存活,有可能交易所回来的信息还在路上或者丢掉了
注: 本字段是由 status 等字段计算出来的,而非服务器发回的原始数据中的字段,则:
1. is_changing() 是判断服务器发回的数据字段,因此不能用于 is_changing() 判断。
2. 在直接 print(order) 时不会显示出此字段。
3. 只能用 order.is_dead 方式取值,不能用 order["is_dead"] 方式。
4. is_online, is_error, trade_price, trade_records 这四个字段同理。
"""
return self.status == "FINISHED"
@property
def is_online(self):
"""
判定这个委托单是否确定已报入交易所并等待成交
:return: 确定委托单已报入交易所时,返回 True, 否则返回 False. 注意,返回 False 不代表确定未报入交易所,有可能交易所回来的信息还在路上或者丢掉了
"""
return self.exchange_order_id != "" and self.status == "ALIVE"
@property
def is_error(self):
"""
判定这个委托单是否确定是错单(即下单失败,一定不会有成交)
:return: 确定委托单是错单时,返回 True, 否则返回 False. 注意,返回 False 不代表确定不是错单,有可能交易所回来的信息还在路上或者丢掉了
"""
return self.exchange_order_id == "" and self.status == "FINISHED"
@property
def trade_price(self):
"""
平均成交价
:return: 当委托单部分成交或全部成交时, 返回成交部分的平均成交价. 无任何成交时, 返回 nan
"""
tdict = self._api._get_obj(self._api._data, ["trade", self._api._account._account_id, "trades"])
sum_volume = sum([trade.volume for trade_id, trade in tdict.items() if
(not trade_id.startswith("_")) and trade.order_id == self.order_id])
if sum_volume == 0:
return float('nan')
sum_amount = sum([trade.volume * trade.price for trade_id, trade in tdict.items() if
(not trade_id.startswith("_")) and trade.order_id == self.order_id])
return sum_amount / sum_volume
@property
def trade_records(self):
"""
成交记录
:return: dict, 其中每个元素的key为成交ID, value为 :py:class:`~tqsdk.objs.Trade`
"""
tdict = self._api._get_obj(self._api._data, ["trade", self._api._account._account_id, "trades"])
fts = {trade_id: trade for trade_id, trade in tdict.items() if
(not trade_id.startswith("_")) and trade.order_id == self.order_id}
return fts
class Trade(Entity):
""" Trade 是一个成交对象 """
def __init__(self, api):
self._api = api
#: 委托单ID, 对于一个用户的所有委托单,这个ID都是不重复的
self.order_id = ""
#: 成交ID, 对于一个用户的所有成交,这个ID都是不重复的
self.trade_id = ""
#: 交易所成交号
self.exchange_trade_id = ""
#: 交易所
self.exchange_id = ""
#: 交易所内的合约代码
self.instrument_id = ""
#: 下单方向, BUY=买, SELL=卖
self.direction = ""
#: 开平标志, OPEN=开仓, CLOSE=平仓, CLOSETODAY=平今
self.offset = ""
#: 成交价格
self.price = float("nan")
#: 成交手数
self.volume = 0
#: 成交时间,自unix epoch(1970-01-01 00:00:00 GMT)以来的纳秒数
self.trade_date_time = 0
| 28.967914
| 134
| 0.542797
|
ab67738c1734d3a2f4ccc7448f52dd8bbf439bc4
| 12,722
|
py
|
Python
|
pyrad/tests/testDictionary.py
|
lemquoc/pyrad
|
d3db68cb00d47dd0b4e8b59024beb4a2d5e115ca
|
[
"BSD-3-Clause"
] | null | null | null |
pyrad/tests/testDictionary.py
|
lemquoc/pyrad
|
d3db68cb00d47dd0b4e8b59024beb4a2d5e115ca
|
[
"BSD-3-Clause"
] | null | null | null |
pyrad/tests/testDictionary.py
|
lemquoc/pyrad
|
d3db68cb00d47dd0b4e8b59024beb4a2d5e115ca
|
[
"BSD-3-Clause"
] | 1
|
2019-07-16T23:41:48.000Z
|
2019-07-16T23:41:48.000Z
|
import unittest
import operator
import os
from six import StringIO
from pyrad.tests import home
from pyrad.dictionary import Attribute
from pyrad.dictionary import Dictionary
from pyrad.dictionary import ParseError
from pyrad.tools import DecodeAttr
from pyrad.dictfile import DictFile
class AttributeTests(unittest.TestCase):
def testInvalidDataType(self):
self.assertRaises(ValueError, Attribute, 'name', 'code', 'datatype')
def testConstructionParameters(self):
attr = Attribute('name', 'code', 'integer', False, 'vendor')
self.assertEqual(attr.name, 'name')
self.assertEqual(attr.code, 'code')
self.assertEqual(attr.type, 'integer')
self.assertEqual(attr.is_sub_attribute, False)
self.assertEqual(attr.vendor, 'vendor')
self.assertEqual(len(attr.values), 0)
self.assertEqual(len(attr.sub_attributes), 0)
def testNamedConstructionParameters(self):
attr = Attribute(name='name', code='code', datatype='integer',
vendor='vendor')
self.assertEqual(attr.name, 'name')
self.assertEqual(attr.code, 'code')
self.assertEqual(attr.type, 'integer')
self.assertEqual(attr.vendor, 'vendor')
self.assertEqual(len(attr.values), 0)
def testValues(self):
attr = Attribute('name', 'code', 'integer', False, 'vendor',
dict(pie='custard', shake='vanilla'))
self.assertEqual(len(attr.values), 2)
self.assertEqual(attr.values['shake'], 'vanilla')
class DictionaryInterfaceTests(unittest.TestCase):
def testEmptyDictionary(self):
dict = Dictionary()
self.assertEqual(len(dict), 0)
def testContainment(self):
dict = Dictionary()
self.assertEqual('test' in dict, False)
self.assertEqual(dict.has_key('test'), False)
dict.attributes['test'] = 'dummy'
self.assertEqual('test' in dict, True)
self.assertEqual(dict.has_key('test'), True)
def testReadonlyContainer(self):
import six
dict = Dictionary()
self.assertRaises(TypeError,
operator.setitem, dict, 'test', 'dummy')
self.assertRaises(AttributeError,
operator.attrgetter('clear'), dict)
self.assertRaises(AttributeError,
operator.attrgetter('update'), dict)
class DictionaryParsingTests(unittest.TestCase):
simple_dict_values = [
('Test-String', 1, 'string'),
('Test-Octets', 2, 'octets'),
('Test-Integer', 3, 'integer'),
('Test-Ip-Address', 4, 'ipaddr'),
('Test-Ipv6-Address', 5, 'ipv6addr'),
('Test-If-Id', 6, 'ifid'),
('Test-Date', 7, 'date'),
('Test-Abinary', 8, 'abinary'),
('Test-Tlv', 9, 'tlv'),
('Test-Tlv-Str', 1, 'string'),
('Test-Tlv-Int', 2, 'integer'),
('Test-Integer64', 10, 'integer64')
]
def setUp(self):
self.path = os.path.join(home, 'tests', 'data')
self.dict = Dictionary(os.path.join(self.path, 'simple'))
def testParseEmptyDictionary(self):
dict = Dictionary(StringIO(''))
self.assertEqual(len(dict), 0)
def testParseMultipleDictionaries(self):
dict = Dictionary(StringIO(''))
self.assertEqual(len(dict), 0)
one = StringIO('ATTRIBUTE Test-First 1 string')
two = StringIO('ATTRIBUTE Test-Second 2 string')
dict = Dictionary(StringIO(''), one, two)
self.assertEqual(len(dict), 2)
def testParseSimpleDictionary(self):
self.assertEqual(len(self.dict),len(self.simple_dict_values))
for (attr, code, type) in self.simple_dict_values:
attr = self.dict[attr]
self.assertEqual(attr.code, code)
self.assertEqual(attr.type, type)
def testAttributeTooFewColumnsError(self):
try:
self.dict.ReadDictionary(
StringIO('ATTRIBUTE Oops-Too-Few-Columns'))
except ParseError as e:
self.assertEqual('attribute' in str(e), True)
else:
self.fail()
def testAttributeUnknownTypeError(self):
try:
self.dict.ReadDictionary(StringIO('ATTRIBUTE Test-Type 1 dummy'))
except ParseError as e:
self.assertEqual('dummy' in str(e), True)
else:
self.fail()
def testAttributeUnknownVendorError(self):
try:
self.dict.ReadDictionary(StringIO('ATTRIBUTE Test-Type 1 Simplon'))
except ParseError as e:
self.assertEqual('Simplon' in str(e), True)
else:
self.fail()
def testAttributeOptions(self):
self.dict.ReadDictionary(StringIO(
'ATTRIBUTE Option-Type 1 string has_tag,encrypt=1'))
self.assertEqual(self.dict['Option-Type'].has_tag, True)
self.assertEqual(self.dict['Option-Type'].encrypt, 1)
def testAttributeEncryptionError(self):
try:
self.dict.ReadDictionary(StringIO(
'ATTRIBUTE Test-Type 1 string encrypt=4'))
except ParseError as e:
self.assertEqual('encrypt' in str(e), True)
else:
self.fail()
def testValueTooFewColumnsError(self):
try:
self.dict.ReadDictionary(StringIO('VALUE Oops-Too-Few-Columns'))
except ParseError as e:
self.assertEqual('value' in str(e), True)
else:
self.fail()
def testValueForUnknownAttributeError(self):
try:
self.dict.ReadDictionary(StringIO(
'VALUE Test-Attribute Test-Text 1'))
except ParseError as e:
self.assertEqual('unknown attribute' in str(e), True)
else:
self.fail()
def testIntegerValueParsing(self):
self.assertEqual(len(self.dict['Test-Integer'].values), 0)
self.dict.ReadDictionary(StringIO('VALUE Test-Integer Value-Six 5'))
self.assertEqual(len(self.dict['Test-Integer'].values), 1)
self.assertEqual(
DecodeAttr('integer',
self.dict['Test-Integer'].values['Value-Six']),
5)
def testInteger64ValueParsing(self):
self.assertEqual(len(self.dict['Test-Integer64'].values), 0)
self.dict.ReadDictionary(StringIO('VALUE Test-Integer64 Value-Six 5'))
self.assertEqual(len(self.dict['Test-Integer64'].values), 1)
self.assertEqual(
DecodeAttr('integer64',
self.dict['Test-Integer64'].values['Value-Six']),
5)
def testStringValueParsing(self):
self.assertEqual(len(self.dict['Test-String'].values), 0)
self.dict.ReadDictionary(StringIO(
'VALUE Test-String Value-Custard custardpie'))
self.assertEqual(len(self.dict['Test-String'].values), 1)
self.assertEqual(
DecodeAttr('string',
self.dict['Test-String'].values['Value-Custard']),
'custardpie')
def testTlvParsing(self):
self.assertEqual(len(self.dict['Test-Tlv'].sub_attributes), 2)
self.assertEqual(self.dict['Test-Tlv'].sub_attributes, {1:'Test-Tlv-Str', 2: 'Test-Tlv-Int'})
def testSubTlvParsing(self):
for (attr, _, _) in self.simple_dict_values:
if attr.startswith('Test-Tlv-'):
self.assertEqual(self.dict[attr].is_sub_attribute, True)
self.assertEqual(self.dict[attr].parent, self.dict['Test-Tlv'])
else:
self.assertEqual(self.dict[attr].is_sub_attribute, False)
self.assertEqual(self.dict[attr].parent, None)
# tlv with vendor
full_dict = Dictionary(os.path.join(self.path, 'full'))
self.assertEqual(full_dict['Simplon-Tlv-Str'].is_sub_attribute, True)
self.assertEqual(full_dict['Simplon-Tlv-Str'].parent, full_dict['Simplon-Tlv'])
self.assertEqual(full_dict['Simplon-Tlv-Int'].is_sub_attribute, True)
self.assertEqual(full_dict['Simplon-Tlv-Int'].parent, full_dict['Simplon-Tlv'])
def testVenderTooFewColumnsError(self):
try:
self.dict.ReadDictionary(StringIO('VENDOR Simplon'))
except ParseError as e:
self.assertEqual('vendor' in str(e), True)
else:
self.fail()
def testVendorParsing(self):
self.assertRaises(ParseError, self.dict.ReadDictionary,
StringIO('ATTRIBUTE Test-Type 1 integer Simplon'))
self.dict.ReadDictionary(StringIO('VENDOR Simplon 42'))
self.assertEqual(self.dict.vendors['Simplon'], 42)
self.dict.ReadDictionary(StringIO(
'ATTRIBUTE Test-Type 1 integer Simplon'))
self.assertEqual(self.dict.attrindex['Test-Type'], (42, 1))
def testVendorOptionError(self):
self.assertRaises(ParseError, self.dict.ReadDictionary,
StringIO('ATTRIBUTE Test-Type 1 integer Simplon'))
try:
self.dict.ReadDictionary(StringIO('VENDOR Simplon 42 badoption'))
except ParseError as e:
self.assertEqual('option' in str(e), True)
else:
self.fail()
def testVendorFormatError(self):
self.assertRaises(ParseError, self.dict.ReadDictionary,
StringIO('ATTRIBUTE Test-Type 1 integer Simplon'))
try:
self.dict.ReadDictionary(StringIO(
'VENDOR Simplon 42 format=5,4'))
except ParseError as e:
self.assertEqual('format' in str(e), True)
else:
self.fail()
def testVendorFormatSyntaxError(self):
self.assertRaises(ParseError, self.dict.ReadDictionary,
StringIO('ATTRIBUTE Test-Type 1 integer Simplon'))
try:
self.dict.ReadDictionary(StringIO(
'VENDOR Simplon 42 format=a,1'))
except ParseError as e:
self.assertEqual('Syntax' in str(e), True)
else:
self.fail()
def testBeginVendorTooFewColumns(self):
try:
self.dict.ReadDictionary(StringIO('BEGIN-VENDOR'))
except ParseError as e:
self.assertEqual('begin-vendor' in str(e), True)
else:
self.fail()
def testBeginVendorUnknownVendor(self):
try:
self.dict.ReadDictionary(StringIO('BEGIN-VENDOR Simplon'))
except ParseError as e:
self.assertEqual('Simplon' in str(e), True)
else:
self.fail()
def testBeginVendorParsing(self):
self.dict.ReadDictionary(StringIO(
'VENDOR Simplon 42\n'
'BEGIN-VENDOR Simplon\n'
'ATTRIBUTE Test-Type 1 integer'))
self.assertEqual(self.dict.attrindex['Test-Type'], (42, 1))
def testEndVendorUnknownVendor(self):
try:
self.dict.ReadDictionary(StringIO('END-VENDOR'))
except ParseError as e:
self.assertEqual('end-vendor' in str(e), True)
else:
self.fail()
def testEndVendorUnbalanced(self):
try:
self.dict.ReadDictionary(StringIO(
'VENDOR Simplon 42\n'
'BEGIN-VENDOR Simplon\n'
'END-VENDOR Oops\n'))
except ParseError as e:
self.assertEqual('Oops' in str(e), True)
else:
self.fail()
def testEndVendorParsing(self):
self.dict.ReadDictionary(StringIO(
'VENDOR Simplon 42\n'
'BEGIN-VENDOR Simplon\n'
'END-VENDOR Simplon\n'
'ATTRIBUTE Test-Type 1 integer'))
self.assertEqual(self.dict.attrindex['Test-Type'], 1)
def testInclude(self):
try:
self.dict.ReadDictionary(StringIO(
'$INCLUDE this_file_does_not_exist\n'
'VENDOR Simplon 42\n'
'BEGIN-VENDOR Simplon\n'
'END-VENDOR Simplon\n'
'ATTRIBUTE Test-Type 1 integer'))
except IOError as e:
self.assertEqual('this_file_does_not_exist' in str(e), True)
else:
self.fail()
def testDictFilePostParse(self):
f = DictFile(StringIO(
'VENDOR Simplon 42\n'))
for _ in f:
pass
self.assertEqual(f.File(), '')
self.assertEqual(f.Line(), -1)
def testDictFileParseError(self):
tmpdict = Dictionary()
try:
tmpdict.ReadDictionary(os.path.join(self.path, 'dictfiletest'))
except ParseError as e:
self.assertEqual('dictfiletest' in str(e), True)
else:
self.fail()
| 37.307918
| 101
| 0.59574
|
b549b5e8e981d0cb09df28f3d4d5136feaf105e6
| 1,681
|
py
|
Python
|
dynamic_yaml/__init__.py
|
childsish/dynamic-yaml
|
a7186d7f9b18f3dc0859c18fcb2b9ed5655e9fba
|
[
"MIT"
] | 27
|
2017-04-04T18:46:02.000Z
|
2022-01-14T16:09:52.000Z
|
dynamic_yaml/__init__.py
|
childsish/dynamic-pyyaml
|
ea50d792120e87d1f2ad496364512d9591daacc0
|
[
"MIT"
] | 10
|
2019-05-03T14:52:11.000Z
|
2021-11-11T10:49:37.000Z
|
dynamic_yaml/__init__.py
|
childsish/dynamic-pyyaml
|
ea50d792120e87d1f2ad496364512d9591daacc0
|
[
"MIT"
] | 8
|
2018-08-01T09:27:08.000Z
|
2022-01-06T06:10:50.000Z
|
import yaml
from typing import Type
class DynamicYamlLoader(yaml.FullLoader):
def __init__(self, stream):
super().__init__(stream)
self.root = None
def add_wrappers(loader: Type[DynamicYamlLoader]):
from .yaml_wrappers import DynamicYamlObject, YamlDict, YamlList
def _add_dict_wrapper(loader_: DynamicYamlLoader, node: yaml.MappingNode):
return YamlDict(((loader_.construct_object(key), loader_.construct_object(value)) for key, value in node.value))
def _add_list_wrapper(loader_: DynamicYamlLoader, node):
return YamlList((loader_.construct_object(child) for child in node.value))
def _represent_dynamic_yaml_dict(dumper: yaml.BaseDumper, data: YamlDict):
return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, {key: data[key] for key in data._collection})
def _represent_dynamic_yaml_list(dumper: yaml.BaseDumper, data: YamlList):
return dumper.represent_sequence(yaml.resolver.BaseResolver.DEFAULT_SEQUENCE_TAG, [data[key] for key in range(len(data._collection))])
loader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _add_dict_wrapper)
loader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_SEQUENCE_TAG, _add_list_wrapper)
yaml.add_representer(YamlDict, _represent_dynamic_yaml_dict)
yaml.add_representer(YamlList, _represent_dynamic_yaml_list)
def load(stream, loader=DynamicYamlLoader, recursive=False):
result = yaml.load(stream, Loader=loader)
result._set_as_root(recursive=recursive)
return result
def dump(data, *args, **kwargs):
return yaml.dump(data, *args, **kwargs)
add_wrappers(DynamicYamlLoader)
| 38.204545
| 142
| 0.774539
|
09fb7095773d5df5c3c3448d1042e7bf803d6286
| 1,646
|
py
|
Python
|
osism/commands/bifrost.py
|
osism/python-osism
|
cb4f74501f92fceab1b803d4990ef20335bb7ca1
|
[
"Apache-2.0"
] | null | null | null |
osism/commands/bifrost.py
|
osism/python-osism
|
cb4f74501f92fceab1b803d4990ef20335bb7ca1
|
[
"Apache-2.0"
] | 28
|
2022-02-03T16:45:11.000Z
|
2022-03-29T13:47:33.000Z
|
osism/commands/bifrost.py
|
osism/python-osism
|
cb4f74501f92fceab1b803d4990ef20335bb7ca1
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
from cliff.command import Command
from redis import Redis
from osism.tasks import ansible
redis = Redis(host="redis", port="6379")
class Run(Command):
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Run, self).get_parser(prog_name)
parser.add_argument('arguments', nargs=argparse.REMAINDER, help='Arguments for Bifrost')
return parser
def take_action(self, parsed_args):
task = ansible.run.delay("manager", "bifrost-command", parsed_args.arguments)
task.wait(timeout=None, interval=0.5)
result = task.get()
print(result)
class Deploy(Command):
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Deploy, self).get_parser(prog_name)
parser.add_argument('--no-wait', default=False, help='Do not wait until the role has been applied', action='store_true')
return parser
def take_action(self, parsed_args):
wait = not parsed_args.no_wait
ansible.run.delay("manager", "bifrost-deploy", [])
if wait:
p = redis.pubsub()
# NOTE: use task_id or request_id in future
p.subscribe("manager-bifrost-deploy")
while True:
for m in p.listen():
if type(m["data"]) == bytes:
if m["data"].decode("utf-8") == "QUIT":
redis.close()
# NOTE: Use better solution
return
print(m["data"].decode("utf-8"), end="")
| 27.898305
| 128
| 0.58627
|
04cfd54cb4463d6e7b298dd9bdc67ccccfa7ec28
| 6,271
|
py
|
Python
|
dockerfiles/tasks.py
|
rtfd/autolint
|
baa26aa89229b0a59a9507eb5992e559d37775b1
|
[
"MIT"
] | null | null | null |
dockerfiles/tasks.py
|
rtfd/autolint
|
baa26aa89229b0a59a9507eb5992e559d37775b1
|
[
"MIT"
] | 1
|
2018-03-15T23:05:13.000Z
|
2018-03-22T19:07:37.000Z
|
dockerfiles/tasks.py
|
rtfd/readthedocs-common
|
baa26aa89229b0a59a9507eb5992e559d37775b1
|
[
"MIT"
] | null | null | null |
from invoke import task
DOCKER_COMPOSE = 'common/dockerfiles/docker-compose.yml'
DOCKER_COMPOSE_SEARCH = 'common/dockerfiles/docker-compose-search.yml'
DOCKER_COMPOSE_WEBPACK = 'common/dockerfiles/docker-compose-webpack.yml'
DOCKER_COMPOSE_ASSETS = 'dockerfiles/docker-compose-assets.yml'
DOCKER_COMPOSE_OVERRIDE = 'docker-compose.override.yml'
DOCKER_COMPOSE_COMMAND = f'docker-compose -f {DOCKER_COMPOSE} -f {DOCKER_COMPOSE_OVERRIDE} -f {DOCKER_COMPOSE_SEARCH} -f {DOCKER_COMPOSE_WEBPACK}'
@task(help={
'cache': 'Build Docker image using cache (default: False)',
})
def build(c, cache=False):
"""Build docker image for servers."""
cache_opt = '' if cache else '--no-cache'
c.run(f'{DOCKER_COMPOSE_COMMAND} build {cache_opt}', pty=True)
@task(help={
'command': 'Command to pass directly to "docker-compose"',
})
def compose(c, command):
"""Pass the command to docker-compose directly."""
c.run(f'{DOCKER_COMPOSE_COMMAND} {command}', pty=True)
@task(help={
'volumes': 'Delete all the data storaged in volumes as well (default: False)',
})
def down(c, volumes=False):
"""Stop and remove all the docker containers."""
if volumes:
c.run(f'{DOCKER_COMPOSE_COMMAND} down -v', pty=True)
else:
c.run(f'{DOCKER_COMPOSE_COMMAND} down', pty=True)
@task(help={
'search': 'Start search container (default: True)',
'init': 'Perform initialization steps (default: False)',
'reload': 'Enable automatic process reloading (default: True)',
'webpack': 'Start webpack development server (default: False)',
'ext-theme': 'Enable new theme from ext-theme (default: False)',
'scale-build': 'Add additional build instances (default: 1)',
})
def up(c, search=True, init=False, reload=True, webpack=False, ext_theme=False, scale_build=1):
"""Start all the docker containers for a Read the Docs instance"""
cmd = []
cmd.append('INIT=t' if init else 'INIT=')
cmd.append('DOCKER_NO_RELOAD=t' if not reload else 'DOCKER_NO_RELOAD=')
cmd.append('docker-compose')
cmd.append(f'-f {DOCKER_COMPOSE}')
cmd.append(f'-f {DOCKER_COMPOSE_OVERRIDE}')
if search:
cmd.append(f'-f {DOCKER_COMPOSE_SEARCH}')
if webpack:
# This option implies the theme is enabled automatically
ext_theme = True
cmd.append(f'-f {DOCKER_COMPOSE_WEBPACK}')
cmd.insert(0, 'RTD_EXT_THEME_DEV_SERVER_ENABLED=t')
if ext_theme:
cmd.insert(0, 'RTD_EXT_THEME_ENABLED=t')
cmd.append('up')
cmd.append(f'--scale build={scale_build}')
c.run(' '.join(cmd), pty=True)
@task(help={
'running': 'Open the shell in a running container',
'container': 'Container to open the shell (default: web)'
})
def shell(c, running=True, container='web'):
"""Run a shell inside a container."""
if running:
c.run(f'{DOCKER_COMPOSE_COMMAND} exec {container} /bin/bash', pty=True)
else:
c.run(f'{DOCKER_COMPOSE_COMMAND} run --rm {container} /bin/bash', pty=True)
@task(help={
'command': 'Command to pass directly to "django-admin" inside the container',
'running': 'Execute "django-admin" in a running container',
'backupdb': 'Backup postgres database before running Django "manage" command',
})
def manage(c, command, running=True, backupdb=False):
"""Run manage.py with a specific command."""
subcmd = 'run --rm'
if running:
subcmd = 'exec'
if backupdb:
c.run(f'{DOCKER_COMPOSE_COMMAND} {subcmd} database pg_dumpall -c -U docs_user > dump_`date +%d-%m-%Y"_"%H_%M_%S`__`git rev-parse HEAD`.sql', pty=True)
c.run(f'{DOCKER_COMPOSE_COMMAND} {subcmd} web python3 manage.py {command}', pty=True)
@task(help={
'container': 'Container to attach',
})
def attach(c, container):
"""Attach a tty to a running container (useful for pdb)."""
prefix = c['container_prefix'] # readthedocsorg or readthedocs-corporate
c.run(f'docker attach --sig-proxy=false --detach-keys="ctrl-p,ctrl-p" {prefix}_{container}_1', pty=True)
@task(help={
'containers': 'Container(s) to restart (it may restart "nginx" container if required)',
})
def restart(c, containers):
"""Restart one or more containers."""
c.run(f'{DOCKER_COMPOSE_COMMAND} restart {containers}', pty=True)
# When restarting a container that nginx is connected to, we need to restart
# nginx as well because it has the IP cached
need_nginx_restart = [
'web',
'proxito',
'storage',
]
for extra in need_nginx_restart:
if extra in containers:
c.run(f'{DOCKER_COMPOSE_COMMAND} restart nginx', pty=True)
break
@task(help={
'only_required': 'Only pull the required image (used by default for clonning). Use it if you don\'t need all images (default: False)',
})
def pull(c, only_required=False):
"""Pull all docker images required for build servers."""
images = [
('ubuntu-20.04-2022.02.16', 'ubuntu-20.04'),
]
if not only_required:
images.extend([
('6.0', 'stable'),
('7.0', 'latest'),
('8.0', 'testing'),
('ubuntu-22.04-2022.03.15', 'ubuntu-22.04'),
])
for image, tag in images:
c.run(f'docker pull readthedocs/build:{image}', pty=True)
c.run(f'docker tag readthedocs/build:{image} readthedocs/build:{tag}', pty=True)
@task(help={
'arguments': 'Arguments to pass directly to "tox" command',
'running': 'Run all tests in a running container',
})
def test(c, arguments='', running=True):
"""Run all test suite using ``tox``."""
if running:
c.run(f'{DOCKER_COMPOSE_COMMAND} exec -e GITHUB_USER=$GITHUB_USER -e GITHUB_TOKEN=$GITHUB_TOKEN web tox {arguments}', pty=True)
else:
c.run(f'{DOCKER_COMPOSE_COMMAND} run -e GITHUB_USER=$GITHUB_USER -e GITHUB_TOKEN=$GITHUB_TOKEN --rm --no-deps web tox {arguments}', pty=True)
@task
def buildassets(c):
"""Build all assets for the application and push them to backend storage"""
c.run(f'docker-compose -f {DOCKER_COMPOSE_ASSETS} run --rm assets bash -c "npm ci && node_modules/bower/bin/bower --allow-root update && npm run build"', pty=True)
c.run(f'{DOCKER_COMPOSE_COMMAND} run --rm web python3 manage.py collectstatic --noinput', pty=True)
| 39.19375
| 167
| 0.668952
|
881684398027de3fabce1c5edebaedaf0c541159
| 2,950
|
py
|
Python
|
kingdom_sdk/adapters/unit_of_work.py
|
t10d/kingdom-python-core
|
87aa614084e5692db23e58e0c82150e858985509
|
[
"MIT"
] | 1
|
2021-11-16T14:59:08.000Z
|
2021-11-16T14:59:08.000Z
|
kingdom_sdk/adapters/unit_of_work.py
|
t10d/kingdom-python-core
|
87aa614084e5692db23e58e0c82150e858985509
|
[
"MIT"
] | 2
|
2021-11-05T17:38:05.000Z
|
2021-11-18T23:06:14.000Z
|
kingdom_sdk/adapters/unit_of_work.py
|
t10d/kingdom-python-core
|
87aa614084e5692db23e58e0c82150e858985509
|
[
"MIT"
] | null | null | null |
from abc import ABC
from typing import Any, Generator, Iterator, List, Set, Tuple
from sqlalchemy import create_engine
from sqlalchemy.orm import Session, sessionmaker
from kingdom_sdk import config
from kingdom_sdk.domain.aggregate import Aggregate
from kingdom_sdk.domain.exception import KingdomError
from kingdom_sdk.ports.unit_of_work import AbstractUnitOfWork
DEFAULT_SESSION_FACTORY = sessionmaker(
# ISOLATION LEVEL ENSURES aggregate's version IS RESPECTED
# That is, if version differs it will raise an exception
bind=create_engine(
config.get_database_url(),
isolation_level="REPEATABLE_READ",
),
autoflush=False,
)
class SQLAlchemyUnitOfWork(AbstractUnitOfWork, ABC):
"""Generic SQLAlchemy Unit of Work.
You only need to extend it and annotate the repositories types.
>>> class MyUnitOfWork(SQLAlchemyUnitOfWork):
... repository: ...
"""
_errors: List[Any]
_session_factory: sessionmaker
_session: Session
def __init__(
self, session_factory: sessionmaker = DEFAULT_SESSION_FACTORY
) -> None:
self._errors = []
self._session_factory = session_factory
def __enter__(self) -> AbstractUnitOfWork:
self._session = self._session_factory()
self._initialize_repositories(self._session)
return super().__enter__()
def __exit__(self, *args: Any) -> None:
super().__exit__(*args)
self._session.close()
def _commit(self) -> None:
self._session.commit()
def _rollback(self) -> None:
self._session.rollback()
def execute_native_statement(self, statement: str, **params: Any) -> Any:
return self._session.execute(statement, params)
def collect_new_events(self) -> Generator:
dirty: Set[Aggregate] = set()
for field_name, _ in self._repositories:
try:
repository = self.__dict__[field_name]
except KeyError as error:
raise RepositoryNotIntializedError(str(error))
if hasattr(repository, "_seen"):
dirty = dirty.union(repository._seen) # noqa
for aggregate in dirty:
while aggregate.has_events:
yield aggregate.next_event
def _initialize_repositories(self, session: Session) -> None:
for field_name, repository in self._repositories:
self.__dict__[field_name] = repository(session)
@property
def _repositories(self) -> Iterator[Tuple[str, Any]]:
return (
(field, module)
for field, module in self.__annotations__.items()
if not field.startswith("_")
)
class RepositoryNotIntializedError(KingdomError):
def __init__(self, repository_name: str) -> None:
super().__init__(
f"The repository '{repository_name}' haven't been initialized yet",
"REPOSITORY_NOT_INITIALIZED_ERROR",
)
| 31.052632
| 79
| 0.668136
|
249540d924e577f9c56dae5949821c31c4315da1
| 254
|
py
|
Python
|
testpy/testqtgui.py
|
quchunguang/test
|
dd1dde14a69d9e8b2c9ed3efbf536df7840f0487
|
[
"MIT"
] | 1
|
2021-05-06T02:02:59.000Z
|
2021-05-06T02:02:59.000Z
|
testpy/testqtgui.py
|
SrikanthParsha14/test
|
8cee69e09c8557d53d8d30382cec8ea5c1f82f6e
|
[
"MIT"
] | null | null | null |
testpy/testqtgui.py
|
SrikanthParsha14/test
|
8cee69e09c8557d53d8d30382cec8ea5c1f82f6e
|
[
"MIT"
] | 1
|
2019-06-17T13:20:39.000Z
|
2019-06-17T13:20:39.000Z
|
#!/usr/bin/env python
import sys
from PyQt4 import QtGui
class Form(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
app = QtGui.QApplication(sys.argv)
form = Form()
form.show()
sys.exit(app.exec_())
| 15.875
| 44
| 0.700787
|
15df941238eb710a49e7ec41d96f95909ac883ea
| 1,508
|
py
|
Python
|
thenewboston_node/business_logic/blockchain/base/__init__.py
|
andbortnik/thenewboston-node
|
bd63c7def5f224286dba70f9560252a7da8ea712
|
[
"MIT"
] | null | null | null |
thenewboston_node/business_logic/blockchain/base/__init__.py
|
andbortnik/thenewboston-node
|
bd63c7def5f224286dba70f9560252a7da8ea712
|
[
"MIT"
] | null | null | null |
thenewboston_node/business_logic/blockchain/base/__init__.py
|
andbortnik/thenewboston-node
|
bd63c7def5f224286dba70f9560252a7da8ea712
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import Type, TypeVar
from django.conf import settings
from thenewboston_node.core.utils.importing import import_from_string
from .account_state import AccountStateMixin
from .blockchain_state import BlockchainStateMixin
from .blocks import BlocksMixin
from .network import NetworkMixin
from .validation import ValidationMixin
T = TypeVar('T', bound='BlockchainBase')
# BlockchainBase is broken into several classes to reduce a single source code file size and simply navigation
# over the class code
class BlockchainBase(ValidationMixin, BlockchainStateMixin, BlocksMixin, AccountStateMixin, NetworkMixin):
_instance = None
def __init__(self, snapshot_period_in_blocks=None):
self.snapshot_period_in_blocks = snapshot_period_in_blocks
@classmethod
def get_instance(cls: Type[T]) -> T:
instance = cls._instance
if not instance:
blockchain_settings = settings.BLOCKCHAIN
class_ = import_from_string(blockchain_settings['class'])
instance = class_(**(blockchain_settings.get('kwargs') or {}))
cls._instance = instance
return instance
@classmethod
def clear_instance_cache(cls):
cls._instance = None
def clear(self):
raise NotImplementedError('Must be implemented in a child class')
def utcnow(self):
return datetime.utcnow()
def is_empty(self):
return not (self.has_blockchain_states() or self.has_blocks())
| 31.416667
| 110
| 0.736074
|
c960436e8cadd5221cc3ed1ab18c015117a43ec1
| 9,005
|
py
|
Python
|
scripts_ddpg/evaluate2-clde.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | 1
|
2020-06-14T13:50:28.000Z
|
2020-06-14T13:50:28.000Z
|
scripts_ddpg/evaluate2-clde.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | null | null | null |
scripts_ddpg/evaluate2-clde.py
|
lbaiao/sys-simulator-2
|
94f00d43309fe7b56dac5099bd4024695ba317b6
|
[
"MIT"
] | null | null | null |
from shutil import copyfile
import matplotlib.pyplot as plt
import os
from math import pi
import numpy as np
from copy import deepcopy
from time import time
from sys_simulator.plots import plot_positions, plot_trajectories
from sys_simulator.ddpg.agent import SurrogateAgent, SysSimAgent
from sys_simulator.parameters.parameters import EnvironmentParameters
from sys_simulator.q_learning.environments.completeEnvironment12 import CompleteEnvironment12
from sys_simulator.channels import BANChannel, UrbanMacroNLOSWinnerChannel
import sys_simulator.general as gen
from sys_simulator.general import load_with_pickle, print_evaluating, random_seed, save_with_pickle
from sys_simulator.ddpg.framework import Framework
import torch
from torch.utils.tensorboard.writer import SummaryWriter
# parameters
ALGO_NAME = 'ddpg'
# FRAMEWORK_PATH = '/home/lucas/dev/sys-simulator-2/data/ddpg/script8/20210522-200418/last_model.pt' # noqa
# FRAMEWORK_PATH = '/home/lucas/dev/sys-simulator-2/data/ddpg/script8-noparamnoise/20210610-121812/last_model.pt'
DATA_PATH = '/home/lucas/dev/sys-simulator-2/data/ddpg/script10/20210627-201024' # noqa
# DATA_PATH = '/home/lucas/dev/sys-simulator-2/data/ddpg/script10/20210627-194449' # noqa
FRAMEWORK_PATH = f'{DATA_PATH}/last_model.pt'
ENV_PATH = f'{DATA_PATH}/env.pickle'
n_mues = 1 # number of mues
n_rb = n_mues # number of RBs
carrier_frequency = 2.4 # carrier frequency in GHz
bs_radius = 1000 # bs radius in m
rb_bandwidth = 180*1e3 # rb bandwidth in Hz
d2d_pair_distance = 5 # d2d pair distance in m
device_height = 1.5 # mobile devices height in m
bs_height = 25 # BS antenna height in m
p_max = 40 # max tx power in dBm
noise_power = -116 # noise power per RB in dBm
bs_gain = 17 # macro bs antenna gain in dBi
user_gain = 4 # user antenna gain in dBi
sinr_threshold_train = 6 # mue sinr threshold in dB for training
mue_margin = 6
MIN_D2D_PAIR_DISTANCE = 1.5
MAX_D2D_PAIR_DISTANCE = 15
# conversions from dBm to dB
p_max = p_max - 30
noise_power = noise_power - 30
# env parameters
RND_SEED = True
SEED = 42
CHANNEL_RND = True
C = 8 # C constant for the improved reward function
ENVIRONMENT_MEMORY = 2
MAX_NUMBER_OF_AGENTS = 2
REWARD_PENALTY = 1.5
DELTA_T = .5
# q-learning parameters
# training
REWARD_FUNCTION = 'jain'
STATES_OPTIONS = ['sinrs', 'positions', 'channels']
MOTION_MODEL = 'forward'
EVAL_STEPS = 700
# writer
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
dir_path = f'data/{ALGO_NAME}/{filename}'
data_path, _ = gen.make_dir_timestamp(dir_path)
writer = SummaryWriter(f'{data_path}/tensorboard')
if RND_SEED:
random_seed(SEED)
torch_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
env_params = EnvironmentParameters(
rb_bandwidth, None, p_max, noise_power,
bs_gain, user_gain, sinr_threshold_train,
n_mues, MAX_NUMBER_OF_AGENTS, n_rb, bs_radius,
c_param=C, mue_margin=mue_margin,
min_d2d_pair_distance=MIN_D2D_PAIR_DISTANCE,
max_d2d_pair_distance=MAX_D2D_PAIR_DISTANCE
)
channel_to_devices = BANChannel(rnd=CHANNEL_RND)
channel_to_bs = UrbanMacroNLOSWinnerChannel(
rnd=CHANNEL_RND, f_c=carrier_frequency, h_bs=bs_height, h_ms=device_height,
small_sigma=4.0, sigma=8.0
)
ref_env = CompleteEnvironment12(
env_params,
channel_to_bs,
channel_to_devices,
reward_penalty=REWARD_PENALTY,
memory=ENVIRONMENT_MEMORY,
bs_height=bs_height,
reward_function=REWARD_FUNCTION,
states_options=STATES_OPTIONS,
memories_capacity=int(1e3),
dt=DELTA_T
)
a_min = -90
a_max = 60
a_offset = -10
# a_min = 0 + 1e-9
# a_max = db_to_power(p_max - 10)
action_size = MAX_NUMBER_OF_AGENTS
framework: Framework = torch.load(FRAMEWORK_PATH, map_location=torch_device)
framework.actor.eval()
central_agent_test = SysSimAgent(a_min, a_max, 'perturberd',
torch_device, a_offset=a_offset)
surr_agents = [SurrogateAgent() for _ in range(MAX_NUMBER_OF_AGENTS)]
pairs_positions = [
((-1, 0, device_height), (-1, 5, device_height)),
((-1, 0, device_height), (-1, -5, device_height)),
]
pairs_directions = [
(2*pi/3, 2*pi/3),
(4*pi/3, 4*pi/3),
]
mue_position = (900, 0, device_height)
mue_direction = pi
n_agents = len(pairs_positions)
env: CompleteEnvironment12 = load_with_pickle(ENV_PATH)
env.dt = DELTA_T
# env.channel_to_bs = channel_to_bs
def evaluate(start: float, writer: SummaryWriter, env: CompleteEnvironment12):
step = 0
env.reset()
env.set_scenario(pairs_positions, mue_position, surr_agents, motion_model=MOTION_MODEL) # noqa
# set directions
for (tx, rx), (d_tx, d_rx) in zip(env.d2d_pairs, pairs_directions):
tx.motion_model.direction = d_tx
rx.motion_model.direction = d_rx
env.d2d_pairs[0][0].motion_model.speed = 0
env.d2d_pairs[0][1].motion_model.speed = 0
env.mue.motion_model.direction = mue_direction
# positions fig
positions_fig = plot_positions(
env.bs, [env.mue],
[p[0] for p in env.d2d_pairs],
[p[1] for p in env.d2d_pairs],
False
)
devices = env.get_devices()
trajectories = {d.id: [d.position] for d in devices}
fig_name = 'original_positions'
svg_path = f'{data_path}/{fig_name}.svg'
eps_path = f'{data_path}/{fig_name}.eps'
plt.savefig(svg_path)
os.system(f'magick convert {svg_path} {eps_path}')
writer.add_figure('Devices positions', positions_fig)
d2d_sinrs = []
mue_sinrs = []
d2d_tx_powers = []
mue_availability = []
mue_tx_powers = []
while step < EVAL_STEPS:
obs, _, _, _ = env.step(surr_agents)
obs = np.array(obs)
now = (time() - start) / 60
print_evaluating(step, now, EVAL_STEPS)
done = False
i = 0
actions = central_agent_test.act(obs, framework, False)
# db_actions = power_to_db(actions)
db_actions = actions
d2d_tx_powers.append(db_actions.numpy())
for j, agent in enumerate(surr_agents):
agent.set_action(db_actions[j].item())
next_obs, reward, done, _ = env.step(surr_agents)
next_obs = np.array(next_obs)
# framework.replay_memory.push(obs, actions, reward, next_obs,
# done)
obs = next_obs
i += 1
step += 1
writer.add_scalar('3. Eval - Rewards', np.mean(reward), step)
sinrs = [a.d2d_tx.sinr for a in surr_agents]
d2d_sinrs.append(sinrs)
sinrs = {f'device {i}': s for i, s in enumerate(sinrs)}
writer.add_scalars('3. Eval - SINRs [dB]', sinrs, step)
writer.add_scalars(
'3. Eval - Transmission powers [dBW]',
{f'device {i}': a for i, a in enumerate(db_actions)},
step
)
ctbs = {
'device 0': env.total_losses['DUE.TX:0']['BS:0'],
'device 1': env.total_losses['DUE.TX:1']['BS:0'],
}
writer.add_scalars(
'3. Eval - Channel to BS [dB]',
ctbs,
step
)
writer.add_scalar('3. Eval - MUE Tx Power [dB]',
env.mue.tx_power, step)
mue_tx_powers.append(env.mue.tx_power)
writer.add_scalar(
'3. Eval - MUE SINR [dB]', env.mue.sinr, step)
mue_sinrs.append(env.mue.sinr)
mue_success = int(env.mue.sinr > env.params.sinr_threshold)
mue_availability.append(mue_success)
writer.add_scalar('3. Eval - MUE success', mue_success, step)
for d in env.get_devices():
trajectories[d.id].append(d.position)
avg_mue_availability = np.mean(mue_availability)
avg_d2d_sinrs = np.mean(d2d_sinrs, axis=0)
writer.add_text('3. Eval - Average MUE availability',
str(avg_mue_availability), step)
for i, s in enumerate(avg_d2d_sinrs):
writer.add_text(f'3. Eval - Average D2D {i} SINR', str(s), step)
# trajectories fig
traj_figs = plot_trajectories(env, trajectories)
fig_name = 'trajectories'
svg_path = f'{data_path}/{fig_name}.svg'
eps_path = f'{data_path}/{fig_name}.eps'
plt.savefig(svg_path)
writer.add_figure('3. Eval - Trajectories', traj_figs, step)
os.system(f'magick convert {svg_path} {eps_path}')
return mue_availability, mue_sinrs, d2d_sinrs, d2d_tx_powers,\
trajectories, mue_tx_powers
if __name__ == '__main__':
start = time()
mue_availability, mue_sinrs, d2d_sinrs, d2d_tx_powers,\
trajectories, mue_tx_powers = evaluate(start, writer, env)
# save stuff
data = {
'mue_availability': mue_availability,
'mue_sinrs': mue_sinrs,
'd2d_sinrs': d2d_sinrs,
'd2d_tx_powers': d2d_tx_powers,
'trajectories': trajectories,
'mue_tx_powers': mue_tx_powers,
}
now = (time() - start) / 60
data_file_path = f'{data_path}/log.pickle'
save_with_pickle(data, data_file_path)
copyfile(__file__, f'{data_path}/{filename}.py')
print(f'done. Elapsed time: {now} minutes.')
| 37.210744
| 113
| 0.687174
|
505ad7e815e372fe541155bb18eb93a5c4f722f7
| 9,780
|
py
|
Python
|
airflow/contrib/operators/postgres_to_gcs_operator.py
|
rlkelly/incubator-airflow
|
7105e40a2a2f0801c85402c49fadfb3fd264267f
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/contrib/operators/postgres_to_gcs_operator.py
|
rlkelly/incubator-airflow
|
7105e40a2a2f0801c85402c49fadfb3fd264267f
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
airflow/contrib/operators/postgres_to_gcs_operator.py
|
rlkelly/incubator-airflow
|
7105e40a2a2f0801c85402c49fadfb3fd264267f
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import json
import time
import datetime
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from decimal import Decimal
from tempfile import NamedTemporaryFile
PY3 = sys.version_info[0] == 3
class PostgresToGoogleCloudStorageOperator(BaseOperator):
"""
Copy data from Postgres to Google Cloud Storage in JSON format.
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename',
'parameters')
template_ext = ('.sql', )
ui_color = '#a0e08c'
@apply_defaults
def __init__(self,
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
postgres_conn_id='postgres_default',
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
parameters=None,
*args,
**kwargs):
"""
:param sql: The SQL to execute on the Postgres table.
:type sql: string
:param bucket: The bucket to upload to.
:type bucket: string
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: string
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from Postgres.
:type schema_filename: string
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filenamed param docs above). Google Cloud Storage allows for files
to be a maximum of 4GB. This param allows developers to specify the
file size of the splits.
:type approx_max_file_size_bytes: long
:param postgres_conn_id: Reference to a specific Postgres hook.
:type postgres_conn_id: string
:param google_cloud_storage_conn_id: Reference to a specific Google
cloud storage hook.
:type google_cloud_storage_conn_id: string
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:param parameters: a parameters dict that is substituted at query runtime.
:type parameters: dict
"""
super(PostgresToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.postgres_conn_id = postgres_conn_id
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.parameters = parameters
def execute(self, context):
cursor = self._query_postgres()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.update(self._write_local_schema_file(cursor))
# Flush all files before uploading
for file_handle in files_to_upload.values():
file_handle.flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for file_handle in files_to_upload.values():
file_handle.close()
def _query_postgres(self):
"""
Queries Postgres and returns a cursor to the results.
"""
postgres = PostgresHook(postgres_conn_id=self.postgres_conn_id)
conn = postgres.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql, self.parameters)
return cursor
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles = {self.filename.format(file_no): tmp_file_handle}
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats
row = map(self.convert_types, row)
row_dict = dict(zip(schema, row))
s = json.dumps(row_dict, sort_keys=True)
if PY3:
s = s.encode('utf-8')
tmp_file_handle.write(s)
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
tmp_file_handles[self.filename.format(file_no)] = tmp_file_handle
return tmp_file_handles
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
for field in cursor.description:
# See PEP 249 for details about the description tuple.
field_name = field[0]
field_type = self.type_map(field[1])
field_mode = 'REPEATED' if field[1] in (1009, 1005, 1007,
1016) else 'NULLABLE'
schema.append({
'name': field_name,
'type': field_type,
'mode': field_mode,
})
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
s = json.dumps(schema, sort_keys=True)
if PY3:
s = s.encode('utf-8')
tmp_schema_file_handle.write(s)
return {self.schema_filename: tmp_schema_file_handle}
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
"""
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to)
for object, tmp_file_handle in files_to_upload.items():
hook.upload(self.bucket, object, tmp_file_handle.name,
'application/json')
@classmethod
def convert_types(cls, value):
"""
Takes a value from Postgres, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery. Dates are converted to UTC seconds.
Decimals are converted to floats. Times are converted to seconds.
"""
if type(value) == datetime.datetime:
return time.mktime(value.timetuple())
elif type(value) == datetime.date:
return value.strftime('%Y-%m-%d')
elif type(value) == datetime.time:
formated_time = time.strptime(str(value), "%H:%M:%S")
return datetime.timedelta(
hours=formated_time.tm_hour,
minutes=formated_time.tm_min,
seconds=formated_time.tm_sec).seconds
elif isinstance(value, Decimal):
return float(value)
else:
return value
@classmethod
def type_map(cls, postgres_type):
"""
Helper function that maps from Postgres fields to BigQuery fields. Used
when a schema_filename is set.
"""
d = {
1114: 'TIMESTAMP',
1184: 'TIMESTAMP',
1083: 'TIMESTAMP',
1082: 'DATE',
1005: 'INTEGER',
1007: 'INTEGER',
1016: 'INTEGER',
20: 'INTEGER',
21: 'INTEGER',
23: 'INTEGER',
16: 'BOOLEAN',
700: 'FLOAT',
701: 'FLOAT',
1700: 'FLOAT'
}
return d[postgres_type] if postgres_type in d else 'STRING'
| 39.277108
| 84
| 0.628425
|
a948a523a83aae6e5da3f9b54c785f6b6f5ce40e
| 1,276
|
py
|
Python
|
main.py
|
jeanmira/Trabalho-sobre-metodos-numericos
|
f43042ffcc11e4c750e247af8e9b2793d5cacdf9
|
[
"MIT"
] | null | null | null |
main.py
|
jeanmira/Trabalho-sobre-metodos-numericos
|
f43042ffcc11e4c750e247af8e9b2793d5cacdf9
|
[
"MIT"
] | null | null | null |
main.py
|
jeanmira/Trabalho-sobre-metodos-numericos
|
f43042ffcc11e4c750e247af8e9b2793d5cacdf9
|
[
"MIT"
] | null | null | null |
# ------------------------------- /usr/bin/g++-7 ------------------------------#
# ------------------------------- coding: utf-8 -------------------------------#
# Criado por: Jean Marcelo Mira Junior
# Lucas Daniel dos Santos
# Versão: 1.0
# Criado em: 13/04/2021
# Sistema operacional: Linux - Ubuntu 20.04.1 LTS
# Python 3
# ------------------------------ Pacotes --------------------------------------#
import matplotlib.pyplot as plt
import biblioteca as bib
import numpy as np
# -----------------------------------------------------------------------------#
def f(s, a):
fi, r, z = a
bo = 0.4
if(s != 0):
return np.array([2-bo*z-np.sin(fi)/r, np.cos(fi), np.sin(fi)])
else:
return np.array([2-bo*z, np.cos(fi), np.sin(fi)])
# Método numérico de Euler
se, re = bib.edoEuler(f, (0, 0, 0), 0, 400, 0.01)
# Método numérico de Heun
sh, rh = bib.edoHeun(f, (0, 0, 0), 0, 400, 0.01)
# Método numérico de Runge-Kutta
sr, rr = bib.edoRungeKutta(f, (0, 0, 0), 0, 400, 0.01)
# Método numérico de Runge-Kutta-Fehlberg
sf, rf = bib.edoRungeKuttaFehlberg(
f, (0, 0, 0), 0, 52, 0.01, 10 ** -3, 4, 0.1)
# bib.planilha(400, se, re, sh, rh, sr, rr, sf, rf)
bib.grafico(se, re, sh, rh, sr, rr, sf, rf)
# bib.gota(re, rh, rr, rf)
| 31.121951
| 80
| 0.467868
|
4915cd58e1672d903087e1f46f0c36273e58ddbf
| 2,174
|
py
|
Python
|
venv/lib/python3.6/site-packages/django_otp/plugins/otp_totp/migrations/0001_initial.py
|
ostar0816/mc-crypto
|
80ad9896aed1dc952f819a404a458ccfad207d8e
|
[
"MIT"
] | 4
|
2018-10-19T04:36:20.000Z
|
2020-02-13T16:14:09.000Z
|
venv/lib/python3.6/site-packages/django_otp/plugins/otp_totp/migrations/0001_initial.py
|
ostar0816/mc-crypto
|
80ad9896aed1dc952f819a404a458ccfad207d8e
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/django_otp/plugins/otp_totp/migrations/0001_initial.py
|
ostar0816/mc-crypto
|
80ad9896aed1dc952f819a404a458ccfad207d8e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_otp.plugins.otp_totp.models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TOTPDevice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='The human-readable name of this device.', max_length=64)),
('confirmed', models.BooleanField(default=True, help_text='Is this device ready for use?')),
('key', models.CharField(default=django_otp.plugins.otp_totp.models.default_key, help_text='A hex-encoded secret key of up to 40 bytes.', max_length=80, validators=[django_otp.plugins.otp_totp.models.key_validator])),
('step', models.PositiveSmallIntegerField(default=30, help_text='The time step in seconds.')),
('t0', models.BigIntegerField(default=0, help_text='The Unix time at which to begin counting steps.')),
('digits', models.PositiveSmallIntegerField(default=6, help_text='The number of digits to expect in a token.', choices=[(6, 6), (8, 8)])),
('tolerance', models.PositiveSmallIntegerField(default=1, help_text='The number of time steps in the past or future to allow.')),
('drift', models.SmallIntegerField(default=0, help_text='The number of time steps the prover is known to deviate from our clock.')),
('last_t', models.BigIntegerField(default=-1, help_text='The t value of the latest verified token. The next token must be at a higher time step.')),
('user', models.ForeignKey(help_text='The user that this device belongs to.', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'abstract': False,
'verbose_name': 'TOTP device',
},
bases=(models.Model,),
),
]
| 57.210526
| 233
| 0.650874
|
9bc694208a5ef8553057a2796a045bd1941b1d20
| 875
|
py
|
Python
|
Sum Consecutives.py
|
Darkhunter9/python
|
8caa3d0aa4b06e5e48871ff085fc740b0ea2d7aa
|
[
"MIT"
] | null | null | null |
Sum Consecutives.py
|
Darkhunter9/python
|
8caa3d0aa4b06e5e48871ff085fc740b0ea2d7aa
|
[
"MIT"
] | null | null | null |
Sum Consecutives.py
|
Darkhunter9/python
|
8caa3d0aa4b06e5e48871ff085fc740b0ea2d7aa
|
[
"MIT"
] | 1
|
2022-01-08T00:20:15.000Z
|
2022-01-08T00:20:15.000Z
|
def sum_consecutives(a):
if not a:
return []
result = []
cur = a[0]
temp = 0
for i in a:
if i == cur:
temp += i
else:
result.append(temp)
temp = i
cur = i
result.append(temp)
return result
if __name__ == '__main__':
print("Example:")
print(list(sum_consecutives([1, 1, 1, 1])))
# These "asserts" are used for self-checking and not for an auto-testing
assert list(sum_consecutives([1, 1, 1, 1])) == [4]
assert list(sum_consecutives([1, 1, 2, 2])) == [2, 4]
assert list(sum_consecutives([1, 1, 2, 1])) == [2, 2, 1]
assert list(sum_consecutives([3, 3, 3, 4, 4, 5, 6, 6])) == [9, 8, 5, 12]
assert list(sum_consecutives([1])) == [1]
assert list(sum_consecutives([])) == []
print("Coding complete? Click 'Check' to earn cool rewards!")
| 27.34375
| 76
| 0.540571
|
08b778d81a185c7a2c50354fc0ba0a23ca563aa8
| 1,664
|
py
|
Python
|
national_id/validators.py
|
AhmedElmougy/national-id-validator
|
27d81cd6e3ef556074c0fd5097db0537fd2114c2
|
[
"BSD-3-Clause"
] | 1
|
2021-06-24T08:31:44.000Z
|
2021-06-24T08:31:44.000Z
|
national_id/validators.py
|
AhmedElmougy/national-id-validator
|
27d81cd6e3ef556074c0fd5097db0537fd2114c2
|
[
"BSD-3-Clause"
] | null | null | null |
national_id/validators.py
|
AhmedElmougy/national-id-validator
|
27d81cd6e3ef556074c0fd5097db0537fd2114c2
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from datetime import date, datetime
from rest_framework import serializers
from national_id.governorate_list import GOVERNORATES
class NationalIdValidator:
"""
validates national id number
"""
def __call__(self, value):
# validate general id format
regex = r"^(?P<century>[23]{1})"\
"(?P<year>\d{2})"\
"(?P<month>\d{2})"\
"(?P<day>\d{2})"\
"(?P<govcode>\d{2})"\
"(?P<bseq>\d{3})"\
"(?P<gender>\d{1})"\
"(?P<validno>\d{1})"
# evaluate regexp
matches = re.search(regex, str(value))
if matches == None:
message = 'invalid national id format'
raise serializers.ValidationError(message)
# Validate government code digits
try:
self.place_of_birth = GOVERNORATES[matches.group('govcode')]
except:
message = 'invalid governorate code format'
raise serializers.ValidationError(message)
# validate date of birth format
try:
self.date_of_birth = date(
int(matches.group('year')),
int(matches.group('month')),
int(matches.group('day'))
)
except:
message = 'invalid birth date format'
raise serializers.ValidationError(message)
# validate century digit against birth date
if matches.group('century') == '3' and \
(int(matches.group('year'))+2000) > datetime.now().year:
message = "century and year doesn't match"
raise serializers.ValidationError(message)
| 29.714286
| 72
| 0.551683
|
2234614c3958f4537d24b2801eff0fb449340c36
| 6,792
|
py
|
Python
|
openflix/providers/ygg/ygg.py
|
ChevalFugace/OpenFlix
|
cf2cadca1a919091c327afffed26c3280449def5
|
[
"MIT"
] | null | null | null |
openflix/providers/ygg/ygg.py
|
ChevalFugace/OpenFlix
|
cf2cadca1a919091c327afffed26c3280449def5
|
[
"MIT"
] | null | null | null |
openflix/providers/ygg/ygg.py
|
ChevalFugace/OpenFlix
|
cf2cadca1a919091c327afffed26c3280449def5
|
[
"MIT"
] | null | null | null |
from openflix.interface import Interface, expose
from openflix import app
from mastodon import Mastodon
from yggtorrentscraper import (YggTorrentScraper, set_yggtorrent_tld,
get_yggtorrent_tld, categories)
import time
import datetime
from http import HTTPStatus as status
from flask import Response, abort, request
import requests
from threading import Timer
import re
import logging
import torrent_parser
from openflix.openflix_utils import route
from openflix import openflix_config
from random import randint
from .cloudflare_bypass import create_cfhandler
def make_ygg_url():
return f"https://www2.yggtorrent.{get_yggtorrent_tld()}"
def get_first_occ(fields, name):
for field in fields:
if field.name == name:
return field
def get_category(name):
for category in categories:
if category["name"] == name:
return category
def t_to_sec(value):
t = time.strptime(value, "%H:%M:%S")
return datetime.timedelta(hours=t.tm_hour,
minutes=t.tm_min,
seconds=t.tm_sec).total_seconds()
class YGGProvider(Interface):
def __init__(self, **options):
self.load_config(options)
with app.app_context():
super().__init__("ygg",subdomain=self.config["subdomain"],
url_prefix="/ygg")
self.init_scraper()
self.init_masto()
self.tracker_url = None
timing = options["update_tld_period"]
if timing > 0:
Timer(timing, self.update_domain).start()
# app.add_url_rule("/provider/ygg/torrent/<int:id>", view_func=self.download_torrent)
def init_scraper(self):
self.scraper = YggTorrentScraper(create_cfhandler())
key = app.get_key(self.config["keyname"])
if key:
login, _, password = key.partition(":")
if not self.scraper.login(login, password):
raise RuntimeWarning(f"could not loggin to yggtorrent")
else:
raise RuntimeWarning("ygg key file do not exist")
def init_masto(self):
token = app.get_key(self.config["masto_keyname"])
self.masto = Mastodon(api_base_url=self.config["masto_url"],
access_token=token)
self.update_domain()
def convert_options(self, options):
media_type = options.get("type")
table = {"movie": ("films_&_videos", ("film", "animation")),
"show": ("films_&_videos", ("emission_tv", "serie_tv")),
"game": ("jeux_video", options.get("platform", ()))
}
entry = table.get(media_type, None)
if entry is not None:
options["category"], options["subcategory"] = entry
return options
def gen_torrent_url(self, torrent):
return "/provider/ygg/torrent" + torrent.url.partition("id=")[2]
def convert_to_content(self, torrent):
return {"provider": "ygg",
"type": "torrent",
"name": torrent.name,
"size": torrent.size,
"uri": self.gen_torrent_url(torrent),
"data": {
"uploaded": torrent.uploaded_datetime.strftime("%Y-%m-%d"),
"completed": torrent.completed,
"seeders": torrent.seeders,
"leechers": torrent.leechers
}
}
def get_max_torrent(self, options):
n = options.get("max_content", None)
if n is not None and n.isdigit():
return int(n)
return self.config["max_torrent"]
def query_content(self, details, options):
options = self.convert_options(options)
release_year = " " + details["release_date"].split("-")[0]
options.update(name=details["title"] + release_year)
torrents = self.scraper.search(options)
if self.config["search_with_original_title"]:
options.update(name=details["original_title"] + release_year)
torrents.extend(self.scraper.search(options))
for _, torrent in zip(range(self.get_max_torrent(options)), torrents):
torrent_details = self.scraper.extract_details(torrent)
yield self.convert_to_content(torrent_details)
@route("/torrent/<int:id>")
def get_torrent(self, id):
url = f"{make_ygg_url()}/engine/download_torrent?id={id}"
response = self.scraper.session.get(url)
return torrent_parser.decode(response.content)
def exchange_url(self, url):
self.tracker_url = url
try:
data = self.get_torrent(randint(1000,5000))
except Exception as e:
logging.error(e)
else:
return data["announce"]
def spoof_torrent(self, id):
data = self.get_torrent(id)
if self.tracker_url is None:
from . import tracker
self.tracker_url = tracker.exchange_url(data["announce"])
data["announce"] = self.tracker_url
return torrent_parser.encode(data)
def download_torrent(self, id):
try:
data = self.spoof_torrent(id)
except Exception as e:
logging.error(e)
return abort(status.INTERNAL_SERVER_ERROR)
return Response(data, mimetype="application/x-bittorrent")
def load_config(self, options):
options.setdefault("masto_url", "https://mamot.fr/")
options.setdefault("account_id", "YggTorrent@mamot.fr")
options.setdefault("masto_keyname", "masto")
options.setdefault("tld_pattern", r"[^\.]+\.yggtorrent\.([^/\"]+)")
options.setdefault("update_tld_period", "10:00:00") # every 10 hours
options.setdefault("keyname", "ygg")
options.setdefault("search_with_original_title", True)
options.setdefault("max_torrent", 4)
options.setdefault("subdomain", openflix_config.API_SUBDOMAIN)
if type(options["update_tld_period"]) == str:
period = options["update_tld_period"]
options["update_tld_period"] = t_to_sec(period)
self.config = options
@expose
def update_domain(self):
accounts = self.masto.account_search(self.config["account_id"])
for account in accounts:
website = get_first_occ(account.fields, "Website")
if website:
match = re.search(self.config["tld_pattern"], website.value)
if match:
set_yggtorrent_tld(match.group(1))
@expose
def set_ygg_tld(self, tld):
set_yggtorrent_tld(tld)
@expose
def get_ygg_tld(self, type, id):
return get_yggtorrent_tld()
| 30.321429
| 93
| 0.605713
|
b99df80a241df4c693bb37bb0d8b5e25d9211097
| 20,212
|
py
|
Python
|
test/augmentation/test_functional.py
|
ceroytres/kornia
|
b9ffe7efcba7399daeeb8028f10c22941b55d32d
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-04-15T01:20:01.000Z
|
2022-01-12T14:12:54.000Z
|
test/augmentation/test_functional.py
|
wyli/kornia
|
53e417eae7c296a0d0b57ad2b1ba8cd11f24c40d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/augmentation/test_functional.py
|
wyli/kornia
|
53e417eae7c296a0d0b57ad2b1ba8cd11f24c40d
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-10-20T06:57:07.000Z
|
2020-10-20T06:57:07.000Z
|
import pytest
import torch
import torch.nn as nn
from torch.testing import assert_allclose
from torch.autograd import gradcheck
import kornia
import kornia.testing as utils # test utils
import kornia.augmentation.functional as F
from kornia.constants import pi
from kornia.augmentation import ColorJitter
class TestHorizontalFlipFn:
def test_random_hflip(self, device, dtype):
input = torch.tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 1., 2.]], device=device, dtype=dtype) # 3 x 4
expected = torch.tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[2., 1., 0., 0.]], device=device, dtype=dtype) # 3 x 4
assert (F.apply_hflip(input) == expected).all()
def test_batch_random_hflip(self, device, dtype):
batch_size = 5
input = torch.tensor([[[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]], device=device, dtype=dtype) # 1 x 1 x 3 x 3
expected = torch.tensor([[[[0., 0., 0.],
[0., 0., 0.],
[1., 1., 0.]]]], device=device, dtype=dtype) # 1 x 1 x 3 x 3
input = input.repeat(batch_size, 3, 1, 1) # 5 x 3 x 3 x 3
expected = expected.repeat(batch_size, 3, 1, 1) # 5 x 3 x 3 x 3
assert (F.apply_hflip(input) == expected).all()
class TestVerticalFlipFn:
def test_random_vflip(self, device, dtype):
input = torch.tensor([[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]], device=device, dtype=dtype) # 3 x 3
expected = torch.tensor([[0., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]], device=device, dtype=dtype) # 3 x 3
assert (F.apply_vflip(input) == expected).all()
def test_batch_random_vflip(self, device, dtype):
batch_size = 5
input = torch.tensor([[[[0., 0., 0.],
[0., 0., 0.],
[0., 1., 1.]]]], device=device, dtype=dtype) # 1 x 1 x 3 x 3
expected = torch.tensor([[[[0., 1., 1.],
[0., 0., 0.],
[0., 0., 0.]]]], device=device, dtype=dtype) # 1 x 1 x 3 x 3
input = input.repeat(batch_size, 3, 1, 1) # 5 x 3 x 3 x 3
expected = expected.repeat(batch_size, 3, 1, 1) # 5 x 3 x 3 x 3
assert (F.apply_vflip(input) == expected).all()
class TestColorJitter:
def test_color_jitter(self):
jitter_param = {
'brightness_factor': torch.tensor(1.),
'contrast_factor': torch.tensor(1.),
'saturation_factor': torch.tensor(1.),
'hue_factor': torch.tensor(0.),
'order': torch.tensor([2, 3, 0, 1])
}
input = torch.rand(3, 5, 5) # 3 x 5 x 5
expected = input
assert_allclose(F.apply_color_jitter(input, jitter_param), expected, atol=1e-4, rtol=1e-5)
def test_color_jitter_batch(self):
batch_size = 2
jitter_param = {
'brightness_factor': torch.tensor([1.] * batch_size),
'contrast_factor': torch.tensor([1.] * batch_size),
'saturation_factor': torch.tensor([1.] * batch_size),
'hue_factor': torch.tensor([0.] * batch_size),
'order': torch.tensor([2, 3, 0, 1])
}
input = torch.rand(batch_size, 3, 5, 5) # 2 x 3 x 5 x 5
expected = input
assert_allclose(F.apply_color_jitter(input, jitter_param), expected, atol=1e-4, rtol=1e-5)
def test_random_brightness(self):
torch.manual_seed(42)
jitter_param = {
'brightness_factor': torch.tensor([1.1529, 1.1660]),
'contrast_factor': torch.tensor([1., 1.]),
'hue_factor': torch.tensor([0., 0.]),
'saturation_factor': torch.tensor([1., 1.]),
'order': torch.tensor([2, 3, 0, 1])
}
input = torch.tensor([[[[0.1, 0.2, 0.3],
[0.6, 0.5, 0.4],
[0.7, 0.8, 1.]]]]) # 1 x 1 x 3 x 3
input = input.repeat(2, 3, 1, 1) # 2 x 3 x 3
expected = torch.tensor([[[[0.2529, 0.3529, 0.4529],
[0.7529, 0.6529, 0.5529],
[0.8529, 0.9529, 1.0000]],
[[0.2529, 0.3529, 0.4529],
[0.7529, 0.6529, 0.5529],
[0.8529, 0.9529, 1.0000]],
[[0.2529, 0.3529, 0.4529],
[0.7529, 0.6529, 0.5529],
[0.8529, 0.9529, 1.0000]]],
[[[0.2660, 0.3660, 0.4660],
[0.7660, 0.6660, 0.5660],
[0.8660, 0.9660, 1.0000]],
[[0.2660, 0.3660, 0.4660],
[0.7660, 0.6660, 0.5660],
[0.8660, 0.9660, 1.0000]],
[[0.2660, 0.3660, 0.4660],
[0.7660, 0.6660, 0.5660],
[0.8660, 0.9660, 1.0000]]]]) # 1 x 1 x 3 x 3
assert_allclose(F.apply_color_jitter(input, jitter_param), expected)
def test_random_contrast(self):
torch.manual_seed(42)
jitter_param = {
'brightness_factor': torch.tensor([1., 1.]),
'contrast_factor': torch.tensor([0.9531, 1.1837]),
'hue_factor': torch.tensor([0., 0.]),
'saturation_factor': torch.tensor([1., 1.]),
'order': torch.tensor([2, 3, 0, 1])
}
input = torch.tensor([[[[0.1, 0.2, 0.3],
[0.6, 0.5, 0.4],
[0.7, 0.8, 1.]]]]) # 1 x 1 x 3 x 3
input = input.repeat(2, 3, 1, 1) # 2 x 3 x 3
expected = torch.tensor([[[[0.0953, 0.1906, 0.2859],
[0.5719, 0.4766, 0.3813],
[0.6672, 0.7625, 0.9531]],
[[0.0953, 0.1906, 0.2859],
[0.5719, 0.4766, 0.3813],
[0.6672, 0.7625, 0.9531]],
[[0.0953, 0.1906, 0.2859],
[0.5719, 0.4766, 0.3813],
[0.6672, 0.7625, 0.9531]]],
[[[0.1184, 0.2367, 0.3551],
[0.7102, 0.5919, 0.4735],
[0.8286, 0.9470, 1.0000]],
[[0.1184, 0.2367, 0.3551],
[0.7102, 0.5919, 0.4735],
[0.8286, 0.9470, 1.0000]],
[[0.1184, 0.2367, 0.3551],
[0.7102, 0.5919, 0.4735],
[0.8286, 0.9470, 1.0000]]]])
assert_allclose(F.apply_color_jitter(input, jitter_param), expected, atol=1e-4, rtol=1e-5)
def test_random_saturation(self):
torch.manual_seed(42)
jitter_param = {
'brightness_factor': torch.tensor([1., 1.]),
'contrast_factor': torch.tensor([1., 1.]),
'hue_factor': torch.tensor([0., 0.]),
'saturation_factor': torch.tensor([0.9026, 1.1175]),
'order': torch.tensor([2, 3, 0, 1])
}
input = torch.tensor([[[[0.1, 0.2, 0.3],
[0.6, 0.5, 0.4],
[0.7, 0.8, 1.]],
[[1.0, 0.5, 0.6],
[0.6, 0.3, 0.2],
[0.8, 0.1, 0.2]],
[[0.6, 0.8, 0.7],
[0.9, 0.3, 0.2],
[0.8, 0.4, .5]]]]) # 1 x 1 x 3 x 3
input = input.repeat(2, 1, 1, 1) # 2 x 3 x 3
expected = torch.tensor([[[[1.8763e-01, 2.5842e-01, 3.3895e-01],
[6.2921e-01, 5.0000e-01, 4.0000e-01],
[7.0974e-01, 8.0000e-01, 1.0000e+00]],
[[1.0000e+00, 5.2921e-01, 6.0974e-01],
[6.2921e-01, 3.1947e-01, 2.1947e-01],
[8.0000e-01, 1.6816e-01, 2.7790e-01]],
[[6.3895e-01, 8.0000e-01, 7.0000e-01],
[9.0000e-01, 3.1947e-01, 2.1947e-01],
[8.0000e-01, 4.3895e-01, 5.4869e-01]]],
[[[1.1921e-07, 1.2953e-01, 2.5302e-01],
[5.6476e-01, 5.0000e-01, 4.0000e-01],
[6.8825e-01, 8.0000e-01, 1.0000e+00]],
[[1.0000e+00, 4.6476e-01, 5.8825e-01],
[5.6476e-01, 2.7651e-01, 1.7651e-01],
[8.0000e-01, 1.7781e-02, 1.0603e-01]],
[[5.5556e-01, 8.0000e-01, 7.0000e-01],
[9.0000e-01, 2.7651e-01, 1.7651e-01],
[8.0000e-01, 3.5302e-01, 4.4127e-01]]]])
assert_allclose(F.apply_color_jitter(input, jitter_param), expected, atol=1e-4, rtol=1e-5)
def test_random_hue(self):
torch.manual_seed(42)
jitter_param = {
'brightness_factor': torch.tensor([1., 1.]),
'contrast_factor': torch.tensor([1., 1.]),
'hue_factor': torch.tensor([-0.0438 / 2 / pi, 0.0404 / 2 / pi]),
'saturation_factor': torch.tensor([1., 1.]),
'order': torch.tensor([2, 3, 0, 1])
}
input = torch.tensor([[[[0.1, 0.2, 0.3],
[0.6, 0.5, 0.4],
[0.7, 0.8, 1.]],
[[1.0, 0.5, 0.6],
[0.6, 0.3, 0.2],
[0.8, 0.1, 0.2]],
[[0.6, 0.8, 0.7],
[0.9, 0.3, 0.2],
[0.8, 0.4, .5]]]]) # 1 x 1 x 3 x 3
input = input.repeat(2, 1, 1, 1) # 2 x 3 x 3
expected = torch.tensor([[[[0.1000, 0.2000, 0.3000],
[0.6000, 0.5000, 0.4000],
[0.7000, 0.8000, 1.0000]],
[[1.0000, 0.5251, 0.6167],
[0.6126, 0.3000, 0.2000],
[0.8000, 0.1000, 0.2000]],
[[0.5623, 0.8000, 0.7000],
[0.9000, 0.3084, 0.2084],
[0.7958, 0.4293, 0.5335]]],
[[[0.1000, 0.2000, 0.3000],
[0.6116, 0.5000, 0.4000],
[0.7000, 0.8000, 1.0000]],
[[1.0000, 0.4769, 0.5846],
[0.6000, 0.3077, 0.2077],
[0.7961, 0.1000, 0.2000]],
[[0.6347, 0.8000, 0.7000],
[0.9000, 0.3000, 0.2000],
[0.8000, 0.3730, 0.4692]]]])
assert_allclose(F.apply_color_jitter(input, jitter_param), expected, atol=1e-4, rtol=1e-5)
class TestRandomGrayscale:
def test_opencv_true(self, device):
data = torch.tensor([[[0.3944633, 0.8597369, 0.1670904, 0.2825457, 0.0953912],
[0.1251704, 0.8020709, 0.8933256, 0.9170977, 0.1497008],
[0.2711633, 0.1111478, 0.0783281, 0.2771807, 0.5487481],
[0.0086008, 0.8288748, 0.9647092, 0.8922020, 0.7614344],
[0.2898048, 0.1282895, 0.7621747, 0.5657831, 0.9918593]],
[[0.5414237, 0.9962701, 0.8947155, 0.5900949, 0.9483274],
[0.0468036, 0.3933847, 0.8046577, 0.3640994, 0.0632100],
[0.6171775, 0.8624780, 0.4126036, 0.7600935, 0.7279997],
[0.4237089, 0.5365476, 0.5591233, 0.1523191, 0.1382165],
[0.8932794, 0.8517839, 0.7152701, 0.8983801, 0.5905426]],
[[0.2869580, 0.4700376, 0.2743714, 0.8135023, 0.2229074],
[0.9306560, 0.3734594, 0.4566821, 0.7599275, 0.7557513],
[0.7415742, 0.6115875, 0.3317572, 0.0379378, 0.1315770],
[0.8692724, 0.0809556, 0.7767404, 0.8742208, 0.1522012],
[0.7708948, 0.4509611, 0.0481175, 0.2358997, 0.6900532]]])
data = data.to(device)
expected = torch.tensor([[[0.4684734, 0.8954562, 0.6064363, 0.5236061, 0.6106016],
[0.1709944, 0.5133104, 0.7915002, 0.5745703, 0.1680204],
[0.5279005, 0.6092287, 0.3034387, 0.5333768, 0.6064113],
[0.3503858, 0.5720159, 0.7052018, 0.4558409, 0.3261529],
[0.6988886, 0.5897652, 0.6532392, 0.7234108, 0.7218805]],
[[0.4684734, 0.8954562, 0.6064363, 0.5236061, 0.6106016],
[0.1709944, 0.5133104, 0.7915002, 0.5745703, 0.1680204],
[0.5279005, 0.6092287, 0.3034387, 0.5333768, 0.6064113],
[0.3503858, 0.5720159, 0.7052018, 0.4558409, 0.3261529],
[0.6988886, 0.5897652, 0.6532392, 0.7234108, 0.7218805]],
[[0.4684734, 0.8954562, 0.6064363, 0.5236061, 0.6106016],
[0.1709944, 0.5133104, 0.7915002, 0.5745703, 0.1680204],
[0.5279005, 0.6092287, 0.3034387, 0.5333768, 0.6064113],
[0.3503858, 0.5720159, 0.7052018, 0.4558409, 0.3261529],
[0.6988886, 0.5897652, 0.6532392, 0.7234108, 0.7218805]]])
expected = expected.to(device)
assert_allclose(F.apply_grayscale(data), expected)
def test_opencv_true_batch(self, device):
batch_size = 4
data = torch.tensor([[[0.3944633, 0.8597369, 0.1670904, 0.2825457, 0.0953912],
[0.1251704, 0.8020709, 0.8933256, 0.9170977, 0.1497008],
[0.2711633, 0.1111478, 0.0783281, 0.2771807, 0.5487481],
[0.0086008, 0.8288748, 0.9647092, 0.8922020, 0.7614344],
[0.2898048, 0.1282895, 0.7621747, 0.5657831, 0.9918593]],
[[0.5414237, 0.9962701, 0.8947155, 0.5900949, 0.9483274],
[0.0468036, 0.3933847, 0.8046577, 0.3640994, 0.0632100],
[0.6171775, 0.8624780, 0.4126036, 0.7600935, 0.7279997],
[0.4237089, 0.5365476, 0.5591233, 0.1523191, 0.1382165],
[0.8932794, 0.8517839, 0.7152701, 0.8983801, 0.5905426]],
[[0.2869580, 0.4700376, 0.2743714, 0.8135023, 0.2229074],
[0.9306560, 0.3734594, 0.4566821, 0.7599275, 0.7557513],
[0.7415742, 0.6115875, 0.3317572, 0.0379378, 0.1315770],
[0.8692724, 0.0809556, 0.7767404, 0.8742208, 0.1522012],
[0.7708948, 0.4509611, 0.0481175, 0.2358997, 0.6900532]]])
data = data.to(device)
data = data.unsqueeze(0).repeat(batch_size, 1, 1, 1)
# Output data generated with OpenCV 4.1.1: cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
expected = torch.tensor([[[0.4684734, 0.8954562, 0.6064363, 0.5236061, 0.6106016],
[0.1709944, 0.5133104, 0.7915002, 0.5745703, 0.1680204],
[0.5279005, 0.6092287, 0.3034387, 0.5333768, 0.6064113],
[0.3503858, 0.5720159, 0.7052018, 0.4558409, 0.3261529],
[0.6988886, 0.5897652, 0.6532392, 0.7234108, 0.7218805]],
[[0.4684734, 0.8954562, 0.6064363, 0.5236061, 0.6106016],
[0.1709944, 0.5133104, 0.7915002, 0.5745703, 0.1680204],
[0.5279005, 0.6092287, 0.3034387, 0.5333768, 0.6064113],
[0.3503858, 0.5720159, 0.7052018, 0.4558409, 0.3261529],
[0.6988886, 0.5897652, 0.6532392, 0.7234108, 0.7218805]],
[[0.4684734, 0.8954562, 0.6064363, 0.5236061, 0.6106016],
[0.1709944, 0.5133104, 0.7915002, 0.5745703, 0.1680204],
[0.5279005, 0.6092287, 0.3034387, 0.5333768, 0.6064113],
[0.3503858, 0.5720159, 0.7052018, 0.4558409, 0.3261529],
[0.6988886, 0.5897652, 0.6532392, 0.7234108, 0.7218805]]])
expected = expected.to(device)
expected = expected.unsqueeze(0).repeat(batch_size, 1, 1, 1)
assert_allclose(F.apply_grayscale(data), expected)
class TestRandomRectangleEarasing:
def test_rectangle_erasing1(self, device):
inputs = torch.ones(1, 1, 10, 10).to(device)
rect_params = {
"widths": torch.tensor([5]),
"heights": torch.tensor([5]),
"xs": torch.tensor([5]),
"ys": torch.tensor([5]),
"values": torch.tensor([0.])
}
expected = torch.tensor([[[
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.]
]]]).to(device)
assert_allclose(F.apply_erase_rectangles(inputs, rect_params), expected)
def test_rectangle_erasing2(self, device):
inputs = torch.ones(3, 3, 3, 3).to(device)
rect_params = {
"widths": torch.tensor([3, 2, 1]),
"heights": torch.tensor([3, 2, 1]),
"xs": torch.tensor([0, 1, 2]),
"ys": torch.tensor([0, 1, 2]),
"values": torch.tensor([0., 0., 0.])
}
expected = torch.tensor(
[[[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]]],
[[[1., 1., 1.],
[1., 0., 0.],
[1., 0., 0.]],
[[1., 1., 1.],
[1., 0., 0.],
[1., 0., 0.]],
[[1., 1., 1.],
[1., 0., 0.],
[1., 0., 0.]]],
[[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.]],
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 0.]]]]
).to(device)
assert_allclose(F.apply_erase_rectangles(inputs, rect_params), expected)
| 44.815965
| 98
| 0.403721
|
6f46890a5f08975016f37f1b18f3bc8b3c453b01
| 1,197
|
py
|
Python
|
stats/tactic_asts.py
|
reichel3/TacTok
|
c344e76263de04311af8a0030c07aec95d87f71c
|
[
"MIT"
] | 7
|
2020-11-23T02:45:36.000Z
|
2022-03-18T03:03:33.000Z
|
stats/tactic_asts.py
|
reichel3/TacTok
|
c344e76263de04311af8a0030c07aec95d87f71c
|
[
"MIT"
] | 4
|
2021-02-23T03:03:51.000Z
|
2021-11-13T00:07:38.000Z
|
stats/tactic_asts.py
|
reichel3/TacTok
|
c344e76263de04311af8a0030c07aec95d87f71c
|
[
"MIT"
] | 2
|
2021-01-19T17:56:28.000Z
|
2022-03-28T04:39:41.000Z
|
import common
import numpy as np
from utils import iter_proofs
from lark.exceptions import UnexpectedCharacters, ParseError
from tac_grammar import CFG, TreeBuilder, NonterminalNode, TerminalNode
import pdb
grammar = CFG(common.tac_grammar, 'tactic_expr')
tree_builder = TreeBuilder(grammar)
ast_height = []
num_tokens = []
num_chars = []
has_argument = []
def process_proof(filename, proof_data):
global ast_height
global num_tokens
global num_chars
for step in proof_data['steps']:
if step['command'][1] != 'VernacExtend':
continue
if not step['command'][0].endswith('.'):
continue
tac_str = step['command'][0][:-1]
try:
tree = tree_builder.transform(grammar.parser.parse(tac_str))
except (UnexpectedCharacters, ParseError) as ex:
continue
ast_height.append(tree.height())
num_tokens.append(tree.num_tokens())
num_chars.append(len(tac_str))
has_argument.append(int(tree.has_argument()))
iter_proofs(common.data_root, process_proof, show_progress=True)
print(np.mean(ast_height), np.mean(num_tokens), np.mean(num_chars), np.mean(has_argument))
| 29.195122
| 90
| 0.688388
|
e1d08b1c8d96281c1a45c1638cec602dd4b39a18
| 9,174
|
py
|
Python
|
nilearn/plotting/html_connectome.py
|
SIMEXP/nilearn
|
4f51aea58f38689ca32c2edd748528d521e6cfb0
|
[
"BSD-2-Clause"
] | 2
|
2015-04-30T23:29:24.000Z
|
2018-04-13T08:38:05.000Z
|
nilearn/plotting/html_connectome.py
|
SIMEXP/nilearn
|
4f51aea58f38689ca32c2edd748528d521e6cfb0
|
[
"BSD-2-Clause"
] | 1
|
2015-02-23T08:53:09.000Z
|
2015-02-23T08:53:09.000Z
|
nilearn/plotting/html_connectome.py
|
SIMEXP/nilearn
|
4f51aea58f38689ca32c2edd748528d521e6cfb0
|
[
"BSD-2-Clause"
] | 1
|
2017-08-23T22:04:21.000Z
|
2017-08-23T22:04:21.000Z
|
import json
import numpy as np
from scipy import sparse
from nilearn._utils import replace_parameters
from .. import datasets
from . import cm
from .js_plotting_utils import (add_js_lib, HTMLDocument, mesh_to_plotly,
encode, colorscale, get_html_template,
to_color_strings)
class ConnectomeView(HTMLDocument):
pass
def _prepare_line(edges, nodes):
path_edges = np.zeros(len(edges) * 3, dtype=int)
path_edges[::3] = edges
path_edges[1::3] = edges
path_nodes = np.zeros(len(nodes) * 3, dtype=int)
path_nodes[::3] = nodes[:, 0]
path_nodes[1::3] = nodes[:, 1]
return path_edges, path_nodes
def _get_connectome(adjacency_matrix, coords, threshold=None,
marker_size=None, cmap=cm.cold_hot, symmetric_cmap=True):
connectome = {}
coords = np.asarray(coords, dtype='<f4')
adjacency_matrix = adjacency_matrix.copy()
colors = colorscale(
cmap, adjacency_matrix.ravel(), threshold=threshold,
symmetric_cmap=symmetric_cmap)
connectome['colorscale'] = colors['colors']
connectome['cmin'] = float(colors['vmin'])
connectome['cmax'] = float(colors['vmax'])
if threshold is not None:
adjacency_matrix[
np.abs(adjacency_matrix) <= colors['abs_threshold']] = 0
s = sparse.coo_matrix(adjacency_matrix)
nodes = np.asarray([s.row, s.col], dtype=int).T
edges = np.arange(len(nodes))
path_edges, path_nodes = _prepare_line(edges, nodes)
connectome["_con_w"] = encode(np.asarray(s.data, dtype='<f4')[path_edges])
c = coords[path_nodes]
if np.ndim(marker_size) > 0:
marker_size = np.asarray(marker_size)
marker_size = marker_size[path_nodes]
x, y, z = c.T
for coord, cname in [(x, "x"), (y, "y"), (z, "z")]:
connectome["_con_{}".format(cname)] = encode(
np.asarray(coord, dtype='<f4'))
connectome["markers_only"] = False
if hasattr(marker_size, 'tolist'):
marker_size = marker_size.tolist()
connectome['marker_size'] = marker_size
return connectome
def _get_markers(coords, colors):
connectome = {}
coords = np.asarray(coords, dtype='<f4')
x, y, z = coords.T
for coord, cname in [(x, "x"), (y, "y"), (z, "z")]:
connectome["_con_{}".format(cname)] = encode(
np.asarray(coord, dtype='<f4'))
connectome["marker_color"] = to_color_strings(colors)
connectome["markers_only"] = True
return connectome
def _make_connectome_html(connectome_info, embed_js=True):
plot_info = {"connectome": connectome_info}
mesh = datasets.fetch_surf_fsaverage()
for hemi in ['pial_left', 'pial_right']:
plot_info[hemi] = mesh_to_plotly(mesh[hemi])
as_json = json.dumps(plot_info)
as_html = get_html_template(
'connectome_plot_template.html').safe_substitute(
{'INSERT_CONNECTOME_JSON_HERE': as_json})
as_html = add_js_lib(as_html, embed_js=embed_js)
return ConnectomeView(as_html)
def _replacement_params_view_connectome():
""" Returns a dict containing deprecated & replacement parameters
as key-value pair for view_connectome().
Avoids cluttering the global namespace.
"""
return {
'coords': 'node_coords',
'threshold': 'edge_threshold',
'cmap': 'edge_cmap',
'marker_size': 'node_size',
}
@replace_parameters(replacement_params=_replacement_params_view_connectome(),
end_version='0.6.0',
lib_name='Nilearn'
)
def view_connectome(adjacency_matrix, node_coords, edge_threshold=None,
edge_cmap=cm.bwr, symmetric_cmap=True,
linewidth=6., node_size=3., colorbar=True,
colorbar_height=.5, colorbar_fontsize=25,
title=None, title_fontsize=25):
"""
Insert a 3d plot of a connectome into an HTML page.
Parameters
----------
adjacency_matrix : ndarray, shape=(n_nodes, n_nodes)
the weights of the edges.
node_coords : ndarray, shape=(n_nodes, 3)
the coordinates of the nodes in MNI space.
edge_threshold : str, number or None, optional (default=None)
If None, no thresholding.
If it is a number only connections of amplitude greater
than threshold will be shown.
If it is a string it must finish with a percent sign,
e.g. "25.3%", and only connections of amplitude above the
given percentile will be shown.
edge_cmap : str or matplotlib colormap, optional
symmetric_cmap : bool, optional (default=True)
Make colormap symmetric (ranging from -vmax to vmax).
linewidth : float, optional (default=6.)
Width of the lines that show connections.
node_size : float, optional (default=3.)
Size of the markers showing the seeds in pixels.
colorbar : bool, optional (default=True)
add a colorbar
colorbar_height : float, optional (default=.5)
height of the colorbar, relative to the figure height
colorbar_fontsize : int, optional (default=25)
fontsize of the colorbar tick labels
title : str, optional (default=None)
title for the plot
title_fontsize : int, optional (default=25)
fontsize of the title
Returns
-------
ConnectomeView : plot of the connectome.
It can be saved as an html page or rendered (transparently) by the
Jupyter notebook. Useful methods are :
- 'resize' to resize the plot displayed in a Jupyter notebook
- 'save_as_html' to save the plot to a file
- 'open_in_browser' to save the plot and open it in a web browser.
See Also
--------
nilearn.plotting.plot_connectome:
projected views of a connectome in a glass brain.
nilearn.plotting.view_markers:
interactive plot of colored markers
nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf:
interactive view of statistical maps or surface atlases on the cortical
surface.
"""
connectome_info = _get_connectome(
adjacency_matrix, node_coords,
threshold=edge_threshold, cmap=edge_cmap,
symmetric_cmap=symmetric_cmap, marker_size=node_size)
connectome_info['line_width'] = linewidth
connectome_info['colorbar'] = colorbar
connectome_info['cbar_height'] = colorbar_height
connectome_info['cbar_fontsize'] = colorbar_fontsize
connectome_info['title'] = title
connectome_info['title_fontsize'] = title_fontsize
return _make_connectome_html(connectome_info)
def _replacement_params_view_markers():
""" Returns a dict containing deprecated & replacement parameters
as key-value pair for view_markers().
Avoids cluttering the global namespace.
"""
return {'coords': 'marker_coords',
'colors': 'marker_color',
}
@replace_parameters(replacement_params=_replacement_params_view_markers(),
end_version='0.6.0',
lib_name='Nilearn',
)
def view_markers(marker_coords, marker_color=None, marker_size=5.,
title=None, title_fontsize=25):
"""
Insert a 3d plot of markers in a brain into an HTML page.
Parameters
----------
marker_coords : ndarray, shape=(n_nodes, 3)
the coordinates of the nodes in MNI space.
marker_color : ndarray, shape=(n_nodes,)
colors of the markers: list of strings, hex rgb or rgba strings, rgb
triplets, or rgba triplets (i.e. formats accepted by matplotlib, see
https://matplotlib.org/users/colors.html#specifying-colors)
marker_size : float or array-like, optional (default=3.)
Size of the markers showing the seeds in pixels.
title : str, optional (default=None)
title for the plot
title_fontsize : int, optional (default=25)
fontsize of the title
Returns
-------
ConnectomeView : plot of the markers.
It can be saved as an html page or rendered (transparently) by the
Jupyter notebook. Useful methods are :
- 'resize' to resize the plot displayed in a Jupyter notebook
- 'save_as_html' to save the plot to a file
- 'open_in_browser' to save the plot and open it in a web browser.
See Also
--------
nilearn.plotting.plot_connectome:
projected views of a connectome in a glass brain.
nilearn.plotting.view_connectome:
interactive plot of a connectome.
nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf:
interactive view of statistical maps or surface atlases on the cortical
surface.
"""
if marker_color is None:
marker_color = ['red' for i in range(len(marker_coords))]
connectome_info = _get_markers(marker_coords, marker_color)
if hasattr(marker_size, 'tolist'):
marker_size = marker_size.tolist()
connectome_info["marker_size"] = marker_size
connectome_info['title'] = title
connectome_info['title_fontsize'] = title_fontsize
return _make_connectome_html(connectome_info)
| 35.149425
| 79
| 0.659799
|
38f14f3ce9b905a152ff4f7b6ef85c46113bdc7d
| 11,977
|
py
|
Python
|
main.py
|
aryanshivamarya/LOCO-BOT
|
3ea8229e282d6b28930af96881693294ae6805c1
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
aryanshivamarya/LOCO-BOT
|
3ea8229e282d6b28930af96881693294ae6805c1
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
aryanshivamarya/LOCO-BOT
|
3ea8229e282d6b28930af96881693294ae6805c1
|
[
"BSD-3-Clause"
] | null | null | null |
'''
using discord.py version 1.0.0a
'''
import discord
import asyncio
import re
import multiprocessing
import threading
import concurrent
BOT_OWNER_ROLE = 'fetch' # change to what you need
#BOT_OWNER_ROLE_ID = "597332392637890571"
oot_channel_id_list = [
"593990608914219008", #loco galaxy
"607613349491900436", #loco IQ
"569420128794443776", #loco unt
"569502072945377290", #indian loco
"595635734904307742", #tf loco
"612177236107460618",#sani loco
"591498350562377741",#planet loco
"605443517069656084", #tf confetti
"593990638916075520", #galaxy confett
"590583414541910018",# confetti IQ
"591294134564683809", #indian confetti
"588070986554015764",#unt confetti
"609405529575653387",# kingdom confetti
"612177284471717894",#sani confetti
"591498756562878475",#planet confetti
"595639586726740049",#tf hq
"591068955523809328",#hq galaxy
"580198028950896640",#HQ tribe
"459842150323060736",#hq dimensions
"513818250652680213",#hq world
"569420198717816852",#hq unt
"568617830258442255"#hq revolution
"598669844983840779",#cashquiz dimension
"446448458090545172",#cashquiz tribe
"610713322509303809",#cashquiz galaxy
"595639664300392472",#cashquiz tf
"596527077402869770",#theq tf
"501220538518077440",#theq dimensions
"446448458090545172",#theq tribe
"513818839008673833",#theq world
"569420278006808586",#theq unt
"580208596139245601",#theq revolution
"535675285211971584",#swagIQ world
"595639769904447502",#swagIQ tf
"446448437119025154",#swagIQ tribe
"501220306128601098",#swagIQ dimension
"570794448808837131",#swagIQ revolution
"514915010955313153",#confeti vietnam world
"595640787933331466",#confetti vietnam tf
"501219307477532674",#confeti vietnam dimension
"571241319658291200",#confeti vietnam unt
"609003338675126272",#confetti vietnam pride
"611439844996153375",#confetti mexico pride
"611980037243273220",#confettimexico pride
"611751492054941696",#confetti mexico
]
answer_pattern = re.compile(r'(not|n)?([1-3]{1})(\?)?(cnf)?(\?)?$', re.IGNORECASE)
apgscore = 500
nomarkscore = 300
markscore = 200
async def update_scores(content, answer_scores):
global answer_pattern
m = answer_pattern.match(content)
if m is None:
return False
ind = int(m[2])-1
if m[1] is None:
if m[3] is None:
if m[4] is None:
answer_scores[ind] += nomarkscore
else: # apg
if m[5] is None:
answer_scores[ind] += apgscore
else:
answer_scores[ind] += markscore
else: # 1? ...
answer_scores[ind] += markscore
else: # contains not or n
if m[3] is None:
answer_scores[ind] -= nomarkscore
else:
answer_scores[ind] -= markscore
return True
class SelfBot(discord.Client):
def __init__(self, update_event, answer_scores):
super().__init__()
global oot_channel_id_list
#global wrong
self.oot_channel_id_list = oot_channel_id_list
self.update_event = update_event
self.answer_scores = answer_scores
async def on_ready(self):
print("======================")
print("Nelson Trivia Self Bot")
print("Connected to discord.")
print("User: " + self.user.name)
print("ID: " + str(self.user.id))
# @bot.event
# async def on_message(message):
# if message.content.startswith('-debug'):
# await message.channel.send('d')
def is_scores_updated(message):
if message.guild == None or \
str(message.channel.id) not in self.oot_channel_id_list:
return False
content = message.content.replace(' ', '').replace("'", "")
m = answer_pattern.match(content)
if m is None:
return False
ind = int(m[2])-1
if m[1] is None:
if m[3] is None:
if m[4] is None:
self.answer_scores[ind] += nomarkscore
else: # apg
if m[5] is None:
self.answer_scores[ind] += apgscore
else:
self.answer_scores[ind] += markscore
else: # 1? ...
self.answer_scores[ind] += markscore
else: # contains not or n
if m[3] is None:
self.answer_scores[ind] -= nomarkscore
else:
self.answer_scores[ind] -= markscore
return True
while True:
await self.wait_for('message', check=is_scores_updated)
self.update_event.set()
class Bot(discord.Client):
def __init__(self, answer_scores):
super().__init__()
self.bot_channel_id_list = []
self.embed_msg = None
self.embed_channel_id = None
#global wrong
self.answer_scores = answer_scores
# embed creation
self.embed=discord.Embed(title="**__TRIVIA SAVAGE | PRO__**", description="**Web Searching** :spy:")
self.embed.set_author(name ='',url=' ',icon_url='https://images-ext-2.discordapp.net/external/aMZ8_Dhu3Cib5U1l--xzP6QVgEV6bzjPDLMC-gNawWY/https/cdn.discordapp.com/attachments/577373201164795904/585046581506605076/ezgif-2-2f5a82b8174f.gif?width=225&height=225')
self.embed.set_thumbnail(url="https://cdn.discordapp.com/attachments/595713706411819033/604679180201754674/image0.png")
self.embed.add_field(name="Option I", value="0", inline=False)
self.embed.add_field(name="Option II", value="0", inline=False)
self.embed.add_field(name="Option III", value="0", inline=False)
self.embed.set_footer(text=f"CAPTAIN COOL#0044",\
icon_url="https://cdn.discordapp.com/attachments/595713706411819033/604679180201754674/image0.png")
self.embed.add_field(name="Suggested Answer!:", value="0", inline=True)
#await self.bot.add_reaction(embed,':spy:')
async def clear_results(self):
for i in range(len(self.answer_scores)):
self.answer_scores[i]=0
async def update_embeds(self):
# global wrong
one_check = ""
two_check = ""
three_check = ""
best_answer = ' :hourglass: '
lst_scores = list(self.answer_scores)
highest = max(lst_scores)
best_answer = ' :hourglass: '
lowest = min(lst_scores)
answer = lst_scores.index(highest)+1
#global wrong
if highest > 0:
if answer == 1:
one_check = "<:white_check_mark:601397380507500549>"
best_answer = ':one:'
else:
one_check = "<:x:600303220417626120>"
if answer == 2:
two_check = "<:white_check_mark:601397380507500549>"
best_answer = ':two:'
else:
two_check = "<:x:600303220417626120>"
if answer == 3:
three_check = "<:white_check_mark:601397380507500549>"
best_answer = ':three:'
else:
three_check = "<:x:600303220417626120>"
#if lowest < 0:
#if answer == 1:
#one_cross = ":x:"
#if answer == 2:
#two_cross = ":x:"
#if answer == 3:
#three_cross = ":x:"
self.embed.set_field_at(0, name="Option I", value="**{0}**{1}".format(lst_scores[0], one_check))
self.embed.set_field_at(1, name="Option II", value="**{0}**{1}".format(lst_scores[1], two_check))
self.embed.set_field_at(2, name="Option III", value="**{0}**{1}".format(lst_scores[2], three_check))
self.embed.set_field_at(3, name="Suggested Answer!:", value=best_answer, inline=True)
if self.embed_msg is not None:
await self.embed_msg.edit(embed=self.embed)
async def on_ready(self):
print("==============")
print("Nelson Trivia")
print("Connected to discord.")
print("User: " + self.user.name)
print("ID: " + str(self.user.id))
await self.clear_results()
await self.update_embeds()
await self.change_presence(activity=discord.Game(name='with '+str(len(set(self.get_all_members())))+' users'))
await self.change_presence(activity=discord.Game(name='Trivia with Captain Cool||*help'))
async def on_message(self, message):
# if message is private
if message.author == self.user or message.guild == None:
return
if message.content.lower() == "*":
await message.delete()
if BOT_OWNER_ROLE in [role.name for role in message.author.roles]:
self.embed_msg = None
await self.clear_results()
await self.update_embeds()
self.embed_msg = \
await message.channel.send('',embed=self.embed)
#await self.embed_msg.add_reaction("✔️")
self.embed_channel_id = message.channel.id
else:
await message.channel.send("**Lol** You Not Have permission To Use This **cmd!** :stuck_out_tongue_winking_eye:")
return
if message.content.startswith('*help'):
await message.delete()
if BOT_OWNER_ROLE in [role.name for role in message.author.roles]:
embed = discord.Embed(title="Help Commands", description="**How Run Bot**", color=0x00ff00)
embed.add_field(name="Support Game", value="**Loco\nConfetti-India\nFlipkart\nJeetoh\nHQ Trivia\nCashquiz\nSwag IQ\nThe Q\nConfetti Vietnam\nConfetti mexico**", inline=False)
embed.add_field(name="when Question come put command", value=" `*` **is command work for all support game except**\n**`*j` is command of jeetoh**\n**`*f` is command for filpkart**\n\n**use cmd! in particular channels**\n\n**FOR MORE INFO CONTACT TO CAPTAIN COOL#0044**", inline=False)
await message.channel.send(embed=embed)
# process votes
if message.channel.id == self.embed_channel_id:
content = message.content.replace(' ', '').replace("'", "")
updated = await update_scores(content, self.answer_scores)
if updated:
await self.update_embeds()
def bot_with_cyclic_update_process(update_event, answer_scores):
def cyclic_update(bot, update_event):
f = asyncio.run_coroutine_threadsafe(bot.update_embeds(), bot.loop)
while True:
update_event.wait()
update_event.clear()
f.cancel()
f = asyncio.run_coroutine_threadsafe(bot.update_embeds(), bot.loop)
#res = f.result()
bot = Bot(answer_scores)
upd_thread = threading.Thread(target=cyclic_update, args=(bot, update_event))
upd_thread.start()
loop = asyncio.get_event_loop()
loop.create_task(bot.start('bot_token_here'))
loop.run_forever()
def selfbot_process(update_event, answer_scores):
selfbot = SelfBot(update_event, answer_scores)
loop = asyncio.get_event_loop()
loop.create_task(selfbot.start('self_token_here',
bot=False))
loop.run_forever()
if __name__ == '__main__':
# running bot and selfbot in separate OS processes
# shared event for embed update
update_event = multiprocessing.Event()
# shared array with answer results
answer_scores = multiprocessing.Array(typecode_or_type='i', size_or_initializer=3)
p_bot = multiprocessing.Process(target=bot_with_cyclic_update_process, args=(update_event, answer_scores))
p_selfbot = multiprocessing.Process(target=selfbot_process, args=(update_event, answer_scores))
p_bot.start()
p_selfbot.start()
p_bot.join()
p_selfbot.join()
| 34.22
| 295
| 0.616515
|
a9556ec8ba62e5c55d22713ac49d2d13c4c3f7a4
| 16,768
|
py
|
Python
|
production/production/node_modules/mavlink/src/mavlink/pymavlink/generator/mavgen_python.py
|
baconpancakes1482/narc-drone
|
d1b6563f68db6488cd46684e6ca32e655e000a57
|
[
"MIT"
] | null | null | null |
production/production/node_modules/mavlink/src/mavlink/pymavlink/generator/mavgen_python.py
|
baconpancakes1482/narc-drone
|
d1b6563f68db6488cd46684e6ca32e655e000a57
|
[
"MIT"
] | 7
|
2021-09-01T00:00:56.000Z
|
2021-11-17T04:08:28.000Z
|
production/production/node_modules/mavlink/src/mavlink/pymavlink/generator/mavgen_python.py
|
baconpancakes1482/narc-drone
|
d1b6563f68db6488cd46684e6ca32e655e000a57
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate a python implementation
Copyright Andrew Tridgell 2011
Released under GNU GPL version 3 or later
'''
import sys, textwrap, os
import mavparse, mavtemplate
t = mavtemplate.MAVTemplate()
def generate_preamble(outf, msgs, args, xml):
print("Generating preamble")
t.write(outf, """
'''
MAVLink protocol implementation (auto-generated by mavgen.py)
Generated from: ${FILELIST}
Note: this file has been auto-generated. DO NOT EDIT
'''
import struct, array, mavutil, time, json
WIRE_PROTOCOL_VERSION = "${WIRE_PROTOCOL_VERSION}"
# some base types from mavlink_types.h
MAVLINK_TYPE_CHAR = 0
MAVLINK_TYPE_UINT8_T = 1
MAVLINK_TYPE_INT8_T = 2
MAVLINK_TYPE_UINT16_T = 3
MAVLINK_TYPE_INT16_T = 4
MAVLINK_TYPE_UINT32_T = 5
MAVLINK_TYPE_INT32_T = 6
MAVLINK_TYPE_UINT64_T = 7
MAVLINK_TYPE_INT64_T = 8
MAVLINK_TYPE_FLOAT = 9
MAVLINK_TYPE_DOUBLE = 10
class MAVLink_header(object):
'''MAVLink message header'''
def __init__(self, msgId, mlen=0, seq=0, srcSystem=0, srcComponent=0):
self.mlen = mlen
self.seq = seq
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.msgId = msgId
def pack(self):
return struct.pack('BBBBBB', ${PROTOCOL_MARKER}, self.mlen, self.seq,
self.srcSystem, self.srcComponent, self.msgId)
class MAVLink_message(object):
'''base MAVLink message class'''
def __init__(self, msgId, name):
self._header = MAVLink_header(msgId)
self._payload = None
self._msgbuf = None
self._crc = None
self._fieldnames = []
self._type = name
def get_msgbuf(self):
if isinstance(self._msgbuf, str):
return self._msgbuf
return self._msgbuf.tostring()
def get_header(self):
return self._header
def get_payload(self):
return self._payload
def get_crc(self):
return self._crc
def get_fieldnames(self):
return self._fieldnames
def get_type(self):
return self._type
def get_msgId(self):
return self._header.msgId
def get_srcSystem(self):
return self._header.srcSystem
def get_srcComponent(self):
return self._header.srcComponent
def get_seq(self):
return self._header.seq
def __str__(self):
ret = '%s {' % self._type
for a in self._fieldnames:
v = getattr(self, a)
ret += '%s : %s, ' % (a, v)
ret = ret[0:-2] + '}'
return ret
def to_dict(self):
d = dict({})
d['mavpackettype'] = self._type
for a in self._fieldnames:
d[a] = getattr(self, a)
return d
def to_json(self):
return json.dumps(self.to_dict())
def pack(self, mav, crc_extra, payload):
self._payload = payload
self._header = MAVLink_header(self._header.msgId, len(payload), mav.seq,
mav.srcSystem, mav.srcComponent)
self._msgbuf = self._header.pack() + payload
crc = mavutil.x25crc(self._msgbuf[1:])
if ${crc_extra}: # using CRC extra
crc.accumulate(chr(crc_extra))
self._crc = crc.crc
self._msgbuf += struct.pack('<H', self._crc)
return self._msgbuf
""", {'FILELIST' : ",".join(args),
'PROTOCOL_MARKER' : xml.protocol_marker,
'crc_extra' : xml.crc_extra,
'WIRE_PROTOCOL_VERSION' : xml.wire_protocol_version })
def generate_enums(outf, enums):
print("Generating enums")
outf.write("\n# enums\n")
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" # ")
for e in enums:
outf.write("\n# %s\n" % e.name)
for entry in e.entry:
outf.write("%s = %u # %s\n" % (entry.name, entry.value, wrapper.fill(entry.description)))
def generate_message_ids(outf, msgs):
print("Generating message IDs")
outf.write("\n# message IDs\n")
outf.write("MAVLINK_MSG_ID_BAD_DATA = -1\n")
for m in msgs:
outf.write("MAVLINK_MSG_ID_%s = %u\n" % (m.name.upper(), m.id))
def generate_classes(outf, msgs):
print("Generating class definitions")
wrapper = textwrap.TextWrapper(initial_indent=" ", subsequent_indent=" ")
for m in msgs:
outf.write("""
class MAVLink_%s_message(MAVLink_message):
'''
%s
'''
def __init__(self""" % (m.name.lower(), wrapper.fill(m.description.strip())))
if len(m.fields) != 0:
outf.write(", " + ", ".join(m.fieldnames))
outf.write("):\n")
outf.write(" MAVLink_message.__init__(self, MAVLINK_MSG_ID_%s, '%s')\n" % (m.name.upper(), m.name.upper()))
if len(m.fieldnames) != 0:
outf.write(" self._fieldnames = ['%s']\n" % "', '".join(m.fieldnames))
for f in m.fields:
outf.write(" self.%s = %s\n" % (f.name, f.name))
outf.write("""
def pack(self, mav):
return MAVLink_message.pack(self, mav, %u, struct.pack('%s'""" % (m.crc_extra, m.fmtstr))
if len(m.fields) != 0:
outf.write(", self." + ", self.".join(m.ordered_fieldnames))
outf.write("))\n")
def mavfmt(field):
'''work out the struct format for a type'''
map = {
'float' : 'f',
'double' : 'd',
'char' : 'c',
'int8_t' : 'b',
'uint8_t' : 'B',
'uint8_t_mavlink_version' : 'B',
'int16_t' : 'h',
'uint16_t' : 'H',
'int32_t' : 'i',
'uint32_t' : 'I',
'int64_t' : 'q',
'uint64_t' : 'Q',
}
if field.array_length:
if field.type in ['char', 'int8_t', 'uint8_t']:
return str(field.array_length)+'s'
return str(field.array_length)+map[field.type]
return map[field.type]
def generate_mavlink_class(outf, msgs, xml):
print("Generating MAVLink class")
outf.write("\n\nmavlink_map = {\n");
for m in msgs:
outf.write(" MAVLINK_MSG_ID_%s : ( '%s', MAVLink_%s_message, %s, %u ),\n" % (
m.name.upper(), m.fmtstr, m.name.lower(), m.order_map, m.crc_extra))
outf.write("}\n\n")
t.write(outf, """
class MAVError(Exception):
'''MAVLink error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVString(str):
'''NUL terminated string'''
def __init__(self, s):
str.__init__(self)
def __str__(self):
i = self.find(chr(0))
if i == -1:
return self[:]
return self[0:i]
class MAVLink_bad_data(MAVLink_message):
'''
a piece of bad data in a mavlink stream
'''
def __init__(self, data, reason):
MAVLink_message.__init__(self, MAVLINK_MSG_ID_BAD_DATA, 'BAD_DATA')
self._fieldnames = ['data', 'reason']
self.data = data
self.reason = reason
self._msgbuf = data
class MAVLink(object):
'''MAVLink protocol handling class'''
def __init__(self, file, srcSystem=0, srcComponent=0):
self.seq = 0
self.file = file
self.srcSystem = srcSystem
self.srcComponent = srcComponent
self.callback = None
self.callback_args = None
self.callback_kwargs = None
self.buf = array.array('B')
self.expected_length = 6
self.have_prefix_error = False
self.robust_parsing = False
self.protocol_marker = ${protocol_marker}
self.little_endian = ${little_endian}
self.crc_extra = ${crc_extra}
self.sort_fields = ${sort_fields}
self.total_packets_sent = 0
self.total_bytes_sent = 0
self.total_packets_received = 0
self.total_bytes_received = 0
self.total_receive_errors = 0
self.startup_time = time.time()
def set_callback(self, callback, *args, **kwargs):
self.callback = callback
self.callback_args = args
self.callback_kwargs = kwargs
def send(self, mavmsg):
'''send a MAVLink message'''
buf = mavmsg.pack(self)
self.file.write(buf)
self.seq = (self.seq + 1) % 255
self.total_packets_sent += 1
self.total_bytes_sent += len(buf)
def bytes_needed(self):
'''return number of bytes needed for next parsing stage'''
ret = self.expected_length - len(self.buf)
if ret <= 0:
return 1
return ret
def parse_char(self, c):
'''input some data bytes, possibly returning a new message'''
if isinstance(c, str):
self.buf.fromstring(c)
else:
self.buf.extend(c)
self.total_bytes_received += len(c)
if len(self.buf) >= 1 and self.buf[0] != ${protocol_marker}:
magic = self.buf[0]
self.buf = self.buf[1:]
if self.robust_parsing:
m = MAVLink_bad_data(chr(magic), "Bad prefix")
if self.callback:
self.callback(m, *self.callback_args, **self.callback_kwargs)
self.expected_length = 6
self.total_receive_errors += 1
return m
if self.have_prefix_error:
return None
self.have_prefix_error = True
self.total_receive_errors += 1
raise MAVError("invalid MAVLink prefix '%s'" % magic)
self.have_prefix_error = False
if len(self.buf) >= 2:
(magic, self.expected_length) = struct.unpack('BB', self.buf[0:2])
self.expected_length += 8
if self.expected_length >= 8 and len(self.buf) >= self.expected_length:
mbuf = self.buf[0:self.expected_length]
self.buf = self.buf[self.expected_length:]
self.expected_length = 6
if self.robust_parsing:
try:
m = self.decode(mbuf)
self.total_packets_received += 1
except MAVError as reason:
m = MAVLink_bad_data(mbuf, reason.message)
self.total_receive_errors += 1
else:
m = self.decode(mbuf)
self.total_packets_received += 1
if self.callback:
self.callback(m, *self.callback_args, **self.callback_kwargs)
return m
return None
def parse_buffer(self, s):
'''input some data bytes, possibly returning a list of new messages'''
m = self.parse_char(s)
if m is None:
return None
ret = [m]
while True:
m = self.parse_char("")
if m is None:
return ret
ret.append(m)
return ret
def decode(self, msgbuf):
'''decode a buffer as a MAVLink message'''
# decode the header
try:
magic, mlen, seq, srcSystem, srcComponent, msgId = struct.unpack('cBBBBB', msgbuf[:6])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink header: %s' % emsg)
if ord(magic) != ${protocol_marker}:
raise MAVError("invalid MAVLink prefix '%s'" % magic)
if mlen != len(msgbuf)-8:
raise MAVError('invalid MAVLink message length. Got %u expected %u, msgId=%u' % (len(msgbuf)-8, mlen, msgId))
if not msgId in mavlink_map:
raise MAVError('unknown MAVLink message ID %u' % msgId)
# decode the payload
(fmt, type, order_map, crc_extra) = mavlink_map[msgId]
# decode the checksum
try:
crc, = struct.unpack('<H', msgbuf[-2:])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink CRC: %s' % emsg)
crc2 = mavutil.x25crc(msgbuf[1:-2])
if ${crc_extra}: # using CRC extra
crc2.accumulate(chr(crc_extra))
if crc != crc2.crc:
raise MAVError('invalid MAVLink CRC in msgID %u 0x%04x should be 0x%04x' % (msgId, crc, crc2.crc))
try:
t = struct.unpack(fmt, msgbuf[6:-2])
except struct.error as emsg:
raise MAVError('Unable to unpack MAVLink payload type=%s fmt=%s payloadLength=%u: %s' % (
type, fmt, len(msgbuf[6:-2]), emsg))
tlist = list(t)
# handle sorted fields
if ${sort_fields}:
t = tlist[:]
for i in range(0, len(tlist)):
tlist[i] = t[order_map[i]]
# terminate any strings
for i in range(0, len(tlist)):
if isinstance(tlist[i], str):
tlist[i] = MAVString(tlist[i])
t = tuple(tlist)
# construct the message object
try:
m = type(*t)
except Exception as emsg:
raise MAVError('Unable to instantiate MAVLink message of type %s : %s' % (type, emsg))
m._msgbuf = msgbuf
m._payload = msgbuf[6:-2]
m._crc = crc
m._header = MAVLink_header(msgId, mlen, seq, srcSystem, srcComponent)
return m
""", xml)
def generate_methods(outf, msgs):
print("Generating methods")
def field_descriptions(fields):
ret = ""
for f in fields:
ret += " %-18s : %s (%s)\n" % (f.name, f.description.strip(), f.type)
return ret
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" ")
for m in msgs:
comment = "%s\n\n%s" % (wrapper.fill(m.description.strip()), field_descriptions(m.fields))
selffieldnames = 'self, '
for f in m.fields:
if f.omit_arg:
selffieldnames += '%s=%s, ' % (f.name, f.const_value)
else:
selffieldnames += '%s, ' % f.name
selffieldnames = selffieldnames[:-2]
sub = {'NAMELOWER' : m.name.lower(),
'SELFFIELDNAMES' : selffieldnames,
'COMMENT' : comment,
'FIELDNAMES' : ", ".join(m.fieldnames)}
t.write(outf, """
def ${NAMELOWER}_encode(${SELFFIELDNAMES}):
'''
${COMMENT}
'''
msg = MAVLink_${NAMELOWER}_message(${FIELDNAMES})
msg.pack(self)
return msg
""", sub)
t.write(outf, """
def ${NAMELOWER}_send(${SELFFIELDNAMES}):
'''
${COMMENT}
'''
return self.send(self.${NAMELOWER}_encode(${FIELDNAMES}))
""", sub)
def generate(basename, xml):
'''generate complete python implemenation'''
if basename.endswith('.py'):
filename = basename
else:
filename = basename + '.py'
msgs = []
enums = []
filelist = []
for x in xml:
msgs.extend(x.message)
enums.extend(x.enum)
filelist.append(os.path.basename(x.filename))
for m in msgs:
if xml[0].little_endian:
m.fmtstr = '<'
else:
m.fmtstr = '>'
for f in m.ordered_fields:
m.fmtstr += mavfmt(f)
m.order_map = [ 0 ] * len(m.fieldnames)
for i in range(0, len(m.fieldnames)):
m.order_map[i] = m.ordered_fieldnames.index(m.fieldnames[i])
print("Generating %s" % filename)
outf = open(filename, "w")
generate_preamble(outf, msgs, filelist, xml[0])
generate_enums(outf, enums)
generate_message_ids(outf, msgs)
generate_classes(outf, msgs)
generate_mavlink_class(outf, msgs, xml[0])
generate_methods(outf, msgs)
outf.close()
print("Generated %s OK" % filename)
| 34.716356
| 130
| 0.527552
|
035cf522f7b8da623c530613d628d0d617b1a56e
| 4,938
|
py
|
Python
|
fcblog/app/routes.py
|
francoiscolombo/sample
|
87a1943b815da8d8c3772758b8181df27242d216
|
[
"MIT"
] | 1
|
2020-03-09T08:26:28.000Z
|
2020-03-09T08:26:28.000Z
|
fcblog/app/routes.py
|
francoiscolombo/sample
|
87a1943b815da8d8c3772758b8181df27242d216
|
[
"MIT"
] | null | null | null |
fcblog/app/routes.py
|
francoiscolombo/sample
|
87a1943b815da8d8c3772758b8181df27242d216
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from flask import render_template, flash, redirect, url_for, request
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.urls import url_parse
from app import app, db
from app.forms import LoginForm, RegistrationForm, EditProfileForm, PostForm
from app.models import User, Post
@app.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
@login_required
def index():
form = PostForm()
if form.validate_on_submit():
post = Post(title=form.title.data, photo=form.photo.data, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('Your post is now live!')
return redirect(url_for('index'))
page = request.args.get('page', 1, type=int)
posts = current_user.followed_posts().paginate(page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('index', page=posts.next_num) if posts.has_next else None
prev_url = url_for('index', page=posts.prev_num) if posts.has_prev else None
return render_template('index.html', title='Home', form=form, posts=posts.items, next_url=next_url, prev_url=prev_url)
@app.route('/explore')
@login_required
def explore():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(page, app.config['POSTS_PER_PAGE'], False)
next_url = url_for('explore', page=posts.next_num) if posts.has_next else None
prev_url = url_for('explore', page=posts.prev_num) if posts.has_prev else None
return render_template('index.html', title='Explore', posts=posts.items, next_url=next_url, prev_url=prev_url)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid username or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
@app.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('user.html', user=user, posts=user.posts)
@app.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title='Edit Profile',form=form)
@app.route('/follow/<username>')
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found.'.format(username))
return redirect(url_for('index'))
if user == current_user:
flash('You cannot follow yourself!')
return redirect(url_for('user', username=username))
current_user.follow(user)
db.session.commit()
flash('You are following {}!'.format(username))
return redirect(url_for('user', username=username))
@app.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found.'.format(username))
return redirect(url_for('index'))
if user == current_user:
flash('You cannot unfollow yourself!')
return redirect(url_for('user', username=username))
current_user.unfollow(user)
db.session.commit()
flash('You are not following {}.'.format(username))
return redirect(url_for('user', username=username))
| 36.308824
| 120
| 0.728028
|
bb867308fdc2d15e96a93db7f41f67d1b77dd47d
| 6,484
|
py
|
Python
|
ppb/engine.py
|
nbraud/pursuedpybear
|
d6647063568133118f66cbe63401a289c8cecfce
|
[
"Artistic-2.0"
] | null | null | null |
ppb/engine.py
|
nbraud/pursuedpybear
|
d6647063568133118f66cbe63401a289c8cecfce
|
[
"Artistic-2.0"
] | 1
|
2019-03-30T14:10:13.000Z
|
2019-04-02T21:29:10.000Z
|
ppb/engine.py
|
nbraud/pursuedpybear
|
d6647063568133118f66cbe63401a289c8cecfce
|
[
"Artistic-2.0"
] | null | null | null |
from collections import defaultdict
from collections import deque
from contextlib import ExitStack
from itertools import chain
import time
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import List
from typing import Type
from typing import Union
import ppb.events as events
from ppb.abc import Engine
from ppb.events import StartScene
from ppb.events import EventMixin
from ppb.events import Quit
from ppb.systems import PygameEventPoller
from ppb.systems import Renderer
from ppb.systems import Updater
from ppb.utils import LoggingMixin
_ellipsis = type(...)
class GameEngine(Engine, EventMixin, LoggingMixin):
def __init__(self, first_scene: Type, *,
systems=(Renderer, Updater, PygameEventPoller),
scene_kwargs=None, **kwargs):
super(GameEngine, self).__init__()
# Engine Configuration
self.first_scene = first_scene
self.scene_kwargs = scene_kwargs or {}
self.kwargs = kwargs
# Engine State
self.scenes = []
self.events = deque()
self.event_extensions: DefaultDict[Union[Type, _ellipsis], List[Callable[[Any], None]]] = defaultdict(list)
self.running = False
self.entered = False
self._last_idle_time = None
# Systems
self.systems_classes = systems
self.systems = []
self.exit_stack = ExitStack()
@property
def current_scene(self):
try:
return self.scenes[-1]
except IndexError:
return None
def __enter__(self):
self.logger.info("Entering context")
self.start_systems()
self.entered = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logger.info("Exiting context")
self.entered = False
self.exit_stack.close()
def start_systems(self):
if self.systems:
return
for system in self.systems_classes:
if isinstance(system, type):
system = system(engine=self, **self.kwargs)
self.systems.append(system)
self.exit_stack.enter_context(system)
def run(self):
if not self.entered:
with self:
self.start()
self.main_loop()
else:
self.start()
self.main_loop()
def start(self):
self.running = True
self._last_idle_time = time.monotonic()
self.activate({"scene_class": self.first_scene,
"kwargs": self.scene_kwargs})
def main_loop(self):
while self.running:
time.sleep(0)
now = time.monotonic()
self.signal(events.Idle(now - self._last_idle_time))
self._last_idle_time = now
while self.events:
self.publish()
def activate(self, next_scene: dict):
scene = next_scene["scene_class"]
if scene is None:
return
args = next_scene.get("args", [])
kwargs = next_scene.get("kwargs", {})
self.scenes.append(scene(self, *args, **kwargs))
def signal(self, event):
self.events.append(event)
def publish(self):
event = self.events.popleft()
scene = self.current_scene
event.scene = scene
extensions = chain(self.event_extensions[type(event)], self.event_extensions[...])
for callback in extensions:
callback(event)
self.__event__(event, self.signal)
for system in self.systems:
system.__event__(event, self.signal)
# Required for if we publish with no current scene.
# Should only happen when the last scene stops via event.
if scene is not None:
scene.__event__(event, self.signal)
for game_object in scene:
game_object.__event__(event, self.signal)
def on_start_scene(self, event: StartScene, signal: Callable[[Any], None]):
"""
Start a new scene. The current scene pauses.
"""
self.pause_scene()
self.start_scene(event.new_scene, event.kwargs)
def on_stop_scene(self, event: events.StopScene, signal: Callable[[Any], None]):
"""
Stop a running scene. If there's a scene on the stack, it resumes.
"""
self.stop_scene()
if self.current_scene is not None:
signal(events.SceneContinued())
else:
signal(events.Quit())
def on_replace_scene(self, event: events.ReplaceScene, signal):
"""
Replace the running scene with a new one.
"""
self.stop_scene()
self.start_scene(event.new_scene, event.kwargs)
def on_quit(self, quit_event: Quit, signal: Callable[[Any], None]):
self.running = False
def pause_scene(self):
# Empty the queue before changing scenes.
self.flush_events()
self.signal(events.ScenePaused())
self.publish()
def stop_scene(self):
# Empty the queue before changing scenes.
self.flush_events()
self.signal(events.SceneStopped())
self.publish()
self.scenes.pop()
def start_scene(self, scene, kwargs):
if isinstance(scene, type):
scene = scene(self, **(kwargs or {}))
self.scenes.append(scene)
self.signal(events.SceneStarted())
def register(self, event_type: Union[Type, _ellipsis], callback: Callable[[], Any]):
"""
Register a callback to be applied to an event at time of publishing.
Primarily to be used by subsystems.
The callback will receive the event. Your code should modify the event
in place. It does not need to return it.
:param event_type: The class of an event.
:param callback: A callable, must accept an event, and return no value.
:return: None
"""
if not isinstance(event_type, type) and event_type is not ...:
raise TypeError(f"{type(self)}.register requires event_type to be a type.")
if not callable(callback):
raise TypeError(f"{type(self)}.register requires callback to be callable.")
self.event_extensions[event_type].append(callback)
def flush_events(self):
"""
Flush the event queue.
Call before doing anything that will cause signals to be delivered to
the wrong scene.
"""
self.events = deque()
| 31.784314
| 115
| 0.61752
|
db56aad5ff327239ce2e35d7a81914fd3886771a
| 110,019
|
py
|
Python
|
emscripten.py
|
revmischa/emscripten
|
ad0838a2f354f86b1bbf1fb799faeadb29b9a7b5
|
[
"MIT"
] | null | null | null |
emscripten.py
|
revmischa/emscripten
|
ad0838a2f354f86b1bbf1fb799faeadb29b9a7b5
|
[
"MIT"
] | null | null | null |
emscripten.py
|
revmischa/emscripten
|
ad0838a2f354f86b1bbf1fb799faeadb29b9a7b5
|
[
"MIT"
] | null | null | null |
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""A small wrapper script around the core JS compiler. This calls that
compiler with the settings given to it. It can also read data from C/C++
header files (so that the JS compiler can see the constants in those
headers, for the libc implementation in JS).
"""
from __future__ import print_function
import difflib
import os
import json
import subprocess
import re
import time
import logging
import shutil
import pprint
from collections import OrderedDict
from tools import shared
from tools import gen_struct_info
from tools import jsrun
from tools.response_file import substitute_response_files
from tools.shared import WINDOWS, asstr, path_from_root, exit_with_error
from tools.toolchain_profiler import ToolchainProfiler
from tools.minified_js_name_generator import MinifiedJsNameGenerator
logger = logging.getLogger('emscripten')
STDERR_FILE = os.environ.get('EMCC_STDERR_FILE')
if STDERR_FILE:
STDERR_FILE = os.path.abspath(STDERR_FILE)
logger.info('logging stderr in js compiler phase into %s' % STDERR_FILE)
STDERR_FILE = open(STDERR_FILE, 'w')
def get_configuration():
if hasattr(get_configuration, 'configuration'):
return get_configuration.configuration
configuration = shared.Configuration(environ=os.environ)
get_configuration.configuration = configuration
return configuration
def quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["'" + p + "'" for p in prop.split('.')])
else:
return prop
def access_quote(prop):
if shared.Settings.USE_CLOSURE_COMPILER == 2:
return ''.join(["['" + p + "']" for p in prop.split('.')])
else:
return '.' + prop
def emscript_fastcomp(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
"""Runs the emscripten LLVM-to-JS compiler.
Args:
infile: The path to the input LLVM assembly file.
outfile: An open file object where the output is written.
"""
assert shared.Settings.ASM_JS, 'fastcomp is asm.js-only (mode 1 or 2)'
success = False
try:
# Overview:
# * Run LLVM backend to emit JS. JS includes function bodies, memory initializer,
# and various metadata
# * Run compiler.js on the metadata to emit the shell js code, pre/post-ambles,
# JS library dependencies, etc.
# metadata is modified by reference in some of the below
# these functions are split up to force variables to go out of scope and allow
# memory to be reclaimed
with ToolchainProfiler.profile_block('get_and_parse_backend'):
backend_output = compile_js(infile, temp_files, DEBUG)
funcs, metadata, mem_init = parse_fastcomp_output(backend_output, DEBUG)
fixup_metadata_tables(metadata)
funcs = fixup_functions(funcs, metadata)
with ToolchainProfiler.profile_block('compiler_glue'):
glue, forwarded_data = compiler_glue(metadata, compiler_engine, temp_files, DEBUG)
with ToolchainProfiler.profile_block('function_tables_and_exports'):
(post, function_table_data, bundled_args) = (
function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG))
with ToolchainProfiler.profile_block('write_output_file'):
finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG)
success = True
finally:
outfile.close()
if not success:
shared.try_delete(outfile.name) # remove partial output
def compile_js(infile, temp_files, DEBUG):
"""Compile infile with asm.js backend, return the contents of the compiled js"""
with temp_files.get_file('.4.js') as temp_js:
backend_cmd = create_backend_cmd(infile, temp_js)
if DEBUG:
logger.debug('emscript: llvm backend: ' + ' '.join(backend_cmd))
t = time.time()
shared.print_compiler_stage(backend_cmd)
with ToolchainProfiler.profile_block('emscript_llvm_backend'):
shared.check_call(backend_cmd)
if DEBUG:
logger.debug(' emscript: llvm backend took %s seconds' % (time.time() - t))
# Split up output
backend_output = open(temp_js).read()
return backend_output
def parse_fastcomp_output(backend_output, DEBUG):
start_funcs_marker = '// EMSCRIPTEN_START_FUNCTIONS'
end_funcs_marker = '// EMSCRIPTEN_END_FUNCTIONS'
metadata_split_marker = '// EMSCRIPTEN_METADATA'
start_funcs = backend_output.index(start_funcs_marker)
end_funcs = backend_output.rindex(end_funcs_marker)
metadata_split = backend_output.rindex(metadata_split_marker)
funcs = backend_output[start_funcs + len(start_funcs_marker):end_funcs]
metadata_raw = backend_output[metadata_split + len(metadata_split_marker):]
mem_init = backend_output[end_funcs + len(end_funcs_marker):metadata_split]
# we no longer use the "Runtime" object. TODO: stop emiting it in the backend
mem_init = mem_init.replace('Runtime.', '')
try:
metadata = json.loads(metadata_raw, object_pairs_hook=OrderedDict)
except ValueError:
logger.error('emscript: failure to parse metadata output from compiler backend. raw output is: \n' + metadata_raw)
raise
# This key is being added to fastcomp but doesn't exist in the current
# version.
metadata.setdefault('externFunctions', [])
if 'externUses' not in metadata:
exit_with_error('Your fastcomp compiler is out of date, please update! (need >= 1.38.26)')
# JS optimizer turns some heap accesses to others as an optimization, so make HEAP8 imply HEAPU8, HEAP16->HEAPU16, and HEAPF64->HEAPF32.
if 'Int8Array' in metadata['externUses']:
metadata['externUses'] += ['Uint8Array']
if 'Int16Array' in metadata['externUses']:
metadata['externUses'] += ['Uint16Array']
if 'Float64Array' in metadata['externUses']:
metadata['externUses'] += ['Float32Array']
# If we are generating references to Math.fround() from here in emscripten.py, declare it used as well.
if provide_fround() or metadata['simd']:
metadata['externUses'] += ['Math.fround']
# functions marked llvm.used in the code are exports requested by the user
shared.Building.user_requested_exports += metadata['exports']
# In MINIMAL_RUNTIME stackSave() and stackRestore are JS library functions. If LLVM backend generated
# calls to invoke_*() functions that save and restore the stack, we must include the stack functions
# explicitly into the build. (In traditional runtime the stack functions are always present, so this
# tracking is not needed)
if shared.Settings.MINIMAL_RUNTIME and (len(metadata['invokeFuncs']) > 0 or shared.Settings.LINKABLE):
shared.Settings.EXPORTED_FUNCTIONS += ['stackSave', 'stackRestore']
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += ['$stackSave', '$stackRestore']
return funcs, metadata, mem_init
def fixup_metadata_tables(metadata):
# if emulating pointer casts, force all tables to the size of the largest
# (for wasm, we use binaryen's fpcast-emu pass, we don't need to do anything
# here)
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
max_size = 0
for k, v in metadata['tables'].items():
max_size = max(max_size, v.count(',') + 1)
for k, v in metadata['tables'].items():
curr = v.count(',') + 1
if curr < max_size:
if v.count('[]') == 1:
metadata['tables'][k] = v.replace(']', (','.join(['0'] * (max_size - curr)) + ']'))
else:
metadata['tables'][k] = v.replace(']', (',0' * (max_size - curr)) + ']')
if shared.Settings.SIDE_MODULE:
for k in metadata['tables'].keys():
metadata['tables'][k] = metadata['tables'][k].replace('var FUNCTION_TABLE_', 'var SIDE_FUNCTION_TABLE_')
def fixup_functions(funcs, metadata):
# function table masks
table_sizes = {}
for k, v in metadata['tables'].items():
# undercounts by one, but that is what we want
table_sizes[k] = str(v.count(','))
# if shared.Settings.ASSERTIONS >= 2 and table_sizes[k] == 0:
# shared.warning('no function pointers with signature ' + k + ', but there is a call, which will abort if it occurs (this can result from undefined behavior, check for compiler warnings on your source files and consider -Werror)'
funcs = re.sub(r"#FM_(\w+)#", lambda m: table_sizes[m.groups(0)[0]], funcs)
# fix +float into float.0, if not running js opts
if not shared.Settings.RUNNING_JS_OPTS:
def fix_dot_zero(m):
num = m.group(3)
# TODO: handle 0x floats?
if num.find('.') < 0:
e = num.find('e')
if e < 0:
num += '.0'
else:
num = num[:e] + '.0' + num[e:]
return m.group(1) + m.group(2) + num
funcs = re.sub(r'([(=,+\-*/%<>:?] *)\+(-?)((0x)?[0-9a-f]*\.?[0-9]+([eE][-+]?[0-9]+)?)', fix_dot_zero, funcs)
return funcs
def compiler_glue(metadata, compiler_engine, temp_files, DEBUG):
if DEBUG:
logger.debug('emscript: js compiler glue')
t = time.time()
# FIXME: do these one by one as normal js lib funcs
metadata['declares'] = [i64_func for i64_func in metadata['declares'] if i64_func not in ['getHigh32', 'setHigh32']]
update_settings_glue(metadata, DEBUG)
assert not (metadata['simd'] and shared.Settings.WASM), 'SIMD is used, but not supported in WASM mode yet'
assert not (shared.Settings.SIMD and shared.Settings.WASM), 'SIMD is requested, but not supported in WASM mode yet'
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
return glue, forwarded_data
def analyze_table(function_table_data):
def table_size(table):
table_contents = table[table.index('[') + 1: table.index(']')]
if len(table_contents) == 0: # empty table
return 0
return table_contents.count(',') + 1
# note that this is a minimal estimate, as when asm2wasm lays out tables it adds padding
table_total_size = sum(table_size(s) for s in function_table_data.values())
shared.Settings.WASM_TABLE_SIZE = table_total_size
# Extracts from JS library code dependencies to runtime primitives.
def get_asm_extern_primitives(pre):
primitives = re.search(r'\/\/ ASM_LIBRARY EXTERN PRIMITIVES: ([^\n]*)', pre)
if primitives:
return [x.strip().replace('Math_', 'Math.') for x in primitives.group(1).split(',')]
else:
return []
def function_tables_and_exports(funcs, metadata, mem_init, glue, forwarded_data, outfile, DEBUG):
if DEBUG:
logger.debug('emscript: python processing: function tables and exports')
t = time.time()
forwarded_json = json.loads(forwarded_data)
# merge in information from llvm backend
function_table_data = metadata['tables']
if shared.Settings.WASM:
analyze_table(function_table_data)
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
pre = apply_script_source(pre)
asm_extern_primitives = get_asm_extern_primitives(pre)
metadata['externUses'] += asm_extern_primitives
pre = memory_and_global_initializers(pre, metadata, mem_init)
pre, funcs_js = get_js_funcs(pre, funcs)
all_exported_functions = get_all_exported_functions(function_table_data)
all_implemented = get_all_implemented(forwarded_json, metadata)
report_missing_symbols(all_implemented, pre)
implemented_functions = get_implemented_functions(metadata)
pre = include_asm_consts(pre, forwarded_json, metadata)
pre = apply_table(pre)
outfile.write(pre)
pre = None
# Move preAsms to their right place
def move_preasm(m):
contents = m.groups(0)[0]
outfile.write(contents + '\n')
return ''
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO and len(funcs_js) > 1:
funcs_js[1] = re.sub(r'/\* PRE_ASM \*/(.*)\n', move_preasm, funcs_js[1])
if 'pre' in function_table_data:
pre_tables = function_table_data['pre']
del function_table_data['pre']
else:
pre_tables = ''
function_table_sigs = list(function_table_data.keys())
in_table, debug_tables, function_tables_defs = make_function_tables_defs(
implemented_functions, all_implemented, function_table_data, metadata)
exported_implemented_functions = get_exported_implemented_functions(
all_exported_functions, all_implemented, metadata)
# List of function signatures of used 'invoke_xxx()' functions in the application
# For backwards compatibility if one might be using a mismatching Emscripten compiler version, if 'invokeFuncs' is not present in metadata,
# use the full list of signatures in function table and generate invoke_() functions for all signatures in the program (producing excessive code size)
# we must also emit the full list if we are emitting code that can be linked later
if 'invokeFuncs' in metadata and not shared.Settings.LINKABLE:
invoke_function_names = metadata['invokeFuncs']
else:
invoke_function_names = ['invoke_' + x for x in function_table_sigs]
asm_setup = create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata)
basic_funcs = create_basic_funcs(function_table_sigs, invoke_function_names)
basic_vars = create_basic_vars(exported_implemented_functions, forwarded_json, metadata)
funcs_js += create_mftCall_funcs(function_table_data)
exports = create_exports(exported_implemented_functions, in_table, function_table_data, metadata)
# calculate globals
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
if not shared.Settings.RELOCATABLE:
global_vars = metadata['externs']
else:
global_vars = [] # linkable code accesses globals through function calls
global_funcs = set(key for key, value in forwarded_json['Functions']['libraryFunctions'].items() if value != 2)
global_funcs = sorted(global_funcs.difference(set(global_vars)).difference(implemented_functions))
if shared.Settings.RELOCATABLE:
global_funcs += ['g$' + extern for extern in metadata['externs']]
global_funcs += ['fp$' + extern for extern in metadata['externFunctions']]
# Tracks the set of used (minified) function names in
# JS symbols imported to asm.js module.
minified_js_names = MinifiedJsNameGenerator()
# Converts list of imports ['foo', 'bar', ...] to a dictionary of
# name mappings in form { 'minified': 'unminified', ... }
def define_asmjs_import_names(imports):
if shared.Settings.MINIFY_ASMJS_IMPORT_NAMES:
return [(minified_js_names.generate(), i) for i in imports]
else:
return [(i, i) for i in imports]
basic_funcs = define_asmjs_import_names(basic_funcs)
global_funcs = define_asmjs_import_names(global_funcs)
basic_vars = define_asmjs_import_names(basic_vars)
global_vars = define_asmjs_import_names(global_vars)
bg_funcs = basic_funcs + global_funcs
bg_vars = basic_vars + global_vars
asm_global_funcs = create_asm_global_funcs(bg_funcs, metadata)
asm_global_vars = create_asm_global_vars(bg_vars)
the_global = create_the_global(metadata)
sending_vars = bg_funcs + bg_vars
sending = OrderedDict([(math_fix(minified), unminified) for (minified, unminified) in sending_vars])
if shared.Settings.WASM:
add_standard_wasm_imports(sending)
sorted_sending_keys = sorted(sending.keys())
sending = '{ ' + ', '.join('"' + k + '": ' + sending[k] for k in sorted_sending_keys) + ' }'
receiving = create_receiving(function_table_data, function_tables_defs,
exported_implemented_functions, metadata['initializers'])
post = apply_table(post)
post = apply_static_code_hooks(post)
if shared.Settings.MINIMAL_RUNTIME:
# Generate invocations for all global initializers directly off the asm export object, e.g. asm['__GLOBAL__INIT']();
post = post.replace('/*** RUN_GLOBAL_INITIALIZERS(); ***/', '\n'.join(["asm['" + x + "']();" for x in global_initializer_funcs(metadata['initializers'])]))
if shared.Settings.WASM:
# Declare all exports out to global JS scope so that JS library functions can access them in a way that minifies well with Closure
# e.g. var a,b,c,d,e,f;
post = post.replace('/*** ASM_MODULE_EXPORTS_DECLARES ***/', 'var ' + ','.join(shared.Settings.MODULE_EXPORTS) + ';')
# Generate assignments from all asm.js/wasm exports out to the JS variables above: e.g. a = asm['a']; b = asm['b'];
post = post.replace('/*** ASM_MODULE_EXPORTS ***/', receiving)
receiving = ''
function_tables_impls = make_function_tables_impls(function_table_data)
final_function_tables = '\n'.join(function_tables_impls) + '\n' + function_tables_defs
if shared.Settings.EMULATED_FUNCTION_POINTERS:
final_function_tables = (
final_function_tables
.replace("asm['", '')
.replace("']", '')
.replace('var SIDE_FUNCTION_TABLE_', 'var FUNCTION_TABLE_')
.replace('var dynCall_', '//')
)
if DEBUG:
logger.debug('asm text sizes' + str([
[len(s) for s in funcs_js], len(asm_setup), len(asm_global_vars), len(asm_global_funcs), len(pre_tables),
len('\n'.join(function_tables_impls)), len(function_tables_defs) + (function_tables_defs.count('\n') * len(' ')),
len(exports), len(the_global), len(sending), len(receiving)]))
logger.debug(' emscript: python processing: function tables and exports took %s seconds' % (time.time() - t))
bundled_args = (funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports)
return (post, function_table_data, bundled_args)
def finalize_output(outfile, post, function_table_data, bundled_args, metadata, DEBUG):
function_table_sigs = function_table_data.keys()
module = create_module_asmjs(function_table_sigs, metadata, *bundled_args)
if DEBUG:
logger.debug('emscript: python processing: finalize')
t = time.time()
write_output_file(outfile, post, module)
module = None
if DEBUG:
logger.debug(' emscript: python processing: finalize took %s seconds' % (time.time() - t))
write_cyberdwarf_data(outfile, metadata)
# Given JS code that consists only exactly of a series of "var a = ...;\n var b = ...;" statements,
# this function collapses the redundant 'var ' statements at the beginning of each line to a
# single var a =..., b=..., c=...; statement.
def collapse_redundant_vars(code):
if shared.Settings.WASM:
return code # Skip if targeting Wasm, this does not matter there
old_code = ''
while code != old_code: # Repeated vars overlap, so can't run in one regex pass. Runs in O(log(N)) time
old_code = code
code = re.sub(r'(var [^;]*);\s*var ', r'\1,\n ', code)
return code
def global_initializer_funcs(initializers):
# If we have at most one global ctor, no need to group global initializers.
# Also in EVAL_CTORS mode, we want to try to evaluate the individual ctor functions, so in that mode,
# do not group ctors into one.
return ['globalCtors'] if (len(initializers) > 1 and not shared.Settings.EVAL_CTORS) else initializers
# Each .cpp file with global constructors generates a __GLOBAL__init() function that needs to be
# called to construct the global objects in that compilation unit. This function groups all these
# global initializer functions together into a single globalCtors() function that lives inside the
# asm.js/wasm module, and gets exported out to JS scope to be called at the startup of the application.
def create_global_initializer(initializers):
# If we have no global ctors, don't even generate a dummy empty function to save code space
# Also in EVAL_CTORS mode, we want to try to evaluate the individual ctor functions, so in that mode,
# we do not group ctors into one.
if 'globalCtors' not in global_initializer_funcs(initializers):
return ''
global_initializer = ''' function globalCtors() {
%s
}''' % '\n '.join(i + '();' for i in initializers)
return global_initializer
def create_module_asmjs(function_table_sigs, metadata,
funcs_js, asm_setup, the_global, sending, receiving, asm_global_vars,
asm_global_funcs, pre_tables, final_function_tables, exports):
receiving += create_named_globals(metadata)
runtime_funcs = create_runtime_funcs_asmjs(exports, metadata)
asm_start_pre = create_asm_start_pre(asm_setup, the_global, sending, metadata)
memory_views = create_memory_views(metadata)
asm_temp_vars = create_asm_temp_vars(metadata)
asm_runtime_thread_local_vars = create_asm_runtime_thread_local_vars()
stack = ''
if not shared.Settings.RELOCATABLE and not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
if 'STACKTOP' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACKTOP = {{{ STACK_BASE }}};\n')
if 'STACK_MAX' in shared.Settings.ASM_PRIMITIVE_VARS:
stack += apply_memory(' var STACK_MAX = {{{ STACK_MAX }}};\n')
if 'tempFloat' in shared.Settings.ASM_PRIMITIVE_VARS:
temp_float = ' var tempFloat = %s;\n' % ('Math_fround(0)' if provide_fround() else '0.0')
else:
temp_float = ''
async_state = ' var asyncState = 0;\n' if shared.Settings.EMTERPRETIFY_ASYNC else ''
f0_fround = ' const f0 = Math_fround(0);\n' if provide_fround() else ''
replace_memory = create_replace_memory(metadata)
start_funcs_marker = '\n// EMSCRIPTEN_START_FUNCS\n'
asm_end = create_asm_end(exports)
asm_variables = collapse_redundant_vars(memory_views + asm_global_vars + asm_temp_vars + asm_runtime_thread_local_vars + '\n' + asm_global_funcs + stack + temp_float + async_state + f0_fround)
asm_global_initializer = create_global_initializer(metadata['initializers'])
module = [
asm_start_pre,
asm_variables,
replace_memory,
start_funcs_marker,
asm_global_initializer
] + runtime_funcs + funcs_js + [
'\n ',
pre_tables, final_function_tables, asm_end,
'\n', receiving, ';\n'
]
if shared.Settings.SIDE_MODULE:
module.append('''
parentModule['registerFunctions'](%s, Module);
''' % str([str(f) for f in function_table_sigs]))
return module
def write_output_file(outfile, post, module):
for i in range(len(module)): # do this loop carefully to save memory
module[i] = normalize_line_endings(module[i])
outfile.write(module[i])
post = normalize_line_endings(post)
outfile.write(post)
def write_cyberdwarf_data(outfile, metadata):
if not shared.Settings.CYBERDWARF:
return
assert('cyberdwarf_data' in metadata)
cd_file_name = outfile.name + ".cd"
with open(cd_file_name, 'w') as f:
json.dump({'cyberdwarf': metadata['cyberdwarf_data']}, f)
def create_backend_cmd(infile, temp_js):
"""Create asm.js backend command from settings dict"""
args = [
shared.LLVM_COMPILER, infile, '-march=js', '-filetype=asm', '-o', temp_js,
'-emscripten-stack-size=%d' % shared.Settings.TOTAL_STACK,
'-O%s' % shared.Settings.OPT_LEVEL,
]
if shared.Settings.PRECISE_F32:
args += ['-emscripten-precise-f32']
if shared.Settings.USE_PTHREADS:
args += ['-emscripten-enable-pthreads']
if shared.Settings.WARN_UNALIGNED:
args += ['-emscripten-warn-unaligned']
if shared.Settings.RESERVED_FUNCTION_POINTERS > 0:
args += ['-emscripten-reserved-function-pointers=%d' % shared.Settings.RESERVED_FUNCTION_POINTERS]
if shared.Settings.ASSERTIONS > 0:
args += ['-emscripten-assertions=%d' % shared.Settings.ASSERTIONS]
if shared.Settings.ALIASING_FUNCTION_POINTERS == 0:
args += ['-emscripten-no-aliasing-function-pointers']
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args += ['-emscripten-emulated-function-pointers']
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
args += ['-emscripten-emulate-function-pointer-casts']
if shared.Settings.RELOCATABLE:
args += ['-emscripten-relocatable']
args += ['-emscripten-global-base=0']
elif shared.Settings.GLOBAL_BASE >= 0:
args += ['-emscripten-global-base=%d' % shared.Settings.GLOBAL_BASE]
if shared.Settings.SIDE_MODULE:
args += ['-emscripten-side-module']
if shared.Settings.LEGALIZE_JS_FFI != 1:
args += ['-emscripten-legalize-javascript-ffi=0']
if shared.Settings.DISABLE_EXCEPTION_CATCHING != 1:
args += ['-enable-emscripten-cpp-exceptions']
if shared.Settings.DISABLE_EXCEPTION_CATCHING == 2:
args += ['-emscripten-cpp-exceptions-whitelist=' + ','.join(shared.Settings.EXCEPTION_CATCHING_WHITELIST or ['fake'])]
if not shared.Settings.EXIT_RUNTIME:
args += ['-emscripten-no-exit-runtime']
if shared.Settings.WORKAROUND_IOS_9_RIGHT_SHIFT_BUG:
args += ['-emscripten-asmjs-work-around-ios-9-right-shift-bug']
if shared.Settings.WASM:
args += ['-emscripten-wasm']
if shared.Building.is_wasm_only():
args += ['-emscripten-only-wasm']
if shared.Settings.CYBERDWARF:
args += ['-enable-cyberdwarf']
return args
def optimize_syscalls(declares, DEBUG):
"""Disables filesystem if only a limited subset of syscalls is used.
Our syscalls are static, and so if we see a very limited set of them - in particular,
no open() syscall and just simple writing - then we don't need full filesystem support.
If FORCE_FILESYSTEM is set, we can't do this. We also don't do it if INCLUDE_FULL_LIBRARY, since
not including the filesystem would mean not including the full JS libraries, and the same for
MAIN_MODULE since a side module might need the filesystem.
"""
relevant_settings = ['FORCE_FILESYSTEM', 'INCLUDE_FULL_LIBRARY', 'MAIN_MODULE']
if any(shared.Settings[s] for s in relevant_settings):
return
if shared.Settings.FILESYSTEM == 0:
# without filesystem support, it doesn't matter what syscalls need
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
else:
syscall_prefixes = ('__syscall', 'fd_', '__wasi_fd_')
syscalls = [d for d in declares if d.startswith(syscall_prefixes)]
# check if the only filesystem syscalls are in: close, ioctl, llseek, write
# (without open, etc.. nothing substantial can be done, so we can disable
# extra filesystem support in that case)
if set(syscalls).issubset(set([
'__syscall6', '__syscall54', '__syscall140',
'fd_seek', '__wasi_fd_seek',
'fd_write', '__wasi_fd_write',
'fd_close', '__wasi_fd_close',
])):
if DEBUG:
logger.debug('very limited syscalls (%s) so disabling full filesystem support', ', '.join(str(s) for s in syscalls))
shared.Settings.SYSCALLS_REQUIRE_FILESYSTEM = 0
def is_int(x):
try:
int(x)
return True
except ValueError:
return False
def align_memory(addr):
return (addr + 15) & -16
def align_static_bump(metadata):
metadata['staticBump'] = align_memory(metadata['staticBump'])
return metadata['staticBump']
def update_settings_glue(metadata, DEBUG):
optimize_syscalls(metadata['declares'], DEBUG)
if shared.Settings.CYBERDWARF:
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE.append("cyberdwarf_Debugger")
shared.Settings.EXPORTED_FUNCTIONS.append("cyberdwarf_Debugger")
# Integrate info from backend
if shared.Settings.SIDE_MODULE:
# we don't need any JS library contents in side modules
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = []
if metadata.get('cantValidate') and shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of non-supported features: ' + metadata['cantValidate'])
shared.Settings.ASM_JS = 2
all_funcs = shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE + [shared.JS.to_nice_ident(d) for d in metadata['declares']]
implemented_funcs = [x[1:] for x in metadata['implementedFunctions']]
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE = sorted(set(all_funcs).difference(implemented_funcs))
shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE += [x[1:] for x in metadata['externs']]
if metadata['simd']:
shared.Settings.SIMD = 1
if shared.Settings.ASM_JS != 2:
shared.WarningManager.warn('ALMOST_ASM', 'disabling asm.js validation due to use of SIMD')
shared.Settings.ASM_JS = 2
shared.Settings.MAX_GLOBAL_ALIGN = metadata['maxGlobalAlign']
shared.Settings.IMPLEMENTED_FUNCTIONS = metadata['implementedFunctions']
# Extract the list of function signatures that MAIN_THREAD_EM_ASM blocks in
# the compiled code have, each signature will need a proxy function invoker
# generated for it.
def read_proxied_function_signatures(asmConsts):
proxied_function_signatures = set()
for _, sigs, proxying_types in asmConsts.values():
for sig, proxying_type in zip(sigs, proxying_types):
if proxying_type == 'sync_on_main_thread_':
proxied_function_signatures.add(sig + '_sync')
elif proxying_type == 'async_on_main_thread_':
proxied_function_signatures.add(sig + '_async')
return list(proxied_function_signatures)
shared.Settings.PROXIED_FUNCTION_SIGNATURES = read_proxied_function_signatures(metadata['asmConsts'])
shared.Settings.STATIC_BUMP = align_static_bump(metadata)
if shared.Settings.WASM_BACKEND:
shared.Settings.BINARYEN_FEATURES = metadata['features']
shared.Settings.WASM_TABLE_SIZE = metadata['tableSize']
if shared.Settings.RELOCATABLE:
# When building relocatable output (e.g. MAIN_MODULE) the reported table
# size does not include the reserved slot at zero for the null pointer.
# Instead we use __table_base to offset the elements by 1.
shared.Settings.WASM_TABLE_SIZE += 1
shared.Settings.MAIN_READS_PARAMS = metadata['mainReadsParams']
# static code hooks
class StaticCodeHooks:
atinits = []
atmains = []
atexits = []
def apply_static_code_hooks(code):
code = code.replace('{{{ ATINITS }}}', StaticCodeHooks.atinits)
code = code.replace('{{{ ATMAINS }}}', StaticCodeHooks.atmains)
code = code.replace('{{{ ATEXITS }}}', StaticCodeHooks.atexits)
return code
def apply_forwarded_data(forwarded_data):
forwarded_json = json.loads(forwarded_data)
# Be aware of JS static allocations
shared.Settings.STATIC_BUMP = forwarded_json['STATIC_BUMP']
shared.Settings.DYNAMICTOP_PTR = forwarded_json['DYNAMICTOP_PTR']
# Be aware of JS static code hooks
StaticCodeHooks.atinits = str(forwarded_json['ATINITS'])
StaticCodeHooks.atmains = str(forwarded_json['ATMAINS'])
StaticCodeHooks.atexits = str(forwarded_json['ATEXITS'])
def compile_settings(compiler_engine, temp_files):
# Save settings to a file to work around v8 issue 1579
with temp_files.get_file('.txt') as settings_file:
with open(settings_file, 'w') as s:
json.dump(shared.Settings.to_dict(), s, sort_keys=True)
# Call js compiler
env = os.environ.copy()
env['EMCC_BUILD_DIR'] = os.getcwd()
out = jsrun.run_js_tool(path_from_root('src', 'compiler.js'), compiler_engine,
[settings_file], stdout=subprocess.PIPE, stderr=STDERR_FILE,
cwd=path_from_root('src'), env=env)
assert '//FORWARDED_DATA:' in out, 'Did not receive forwarded data in pre output - process failed?'
glue, forwarded_data = out.split('//FORWARDED_DATA:')
apply_forwarded_data(forwarded_data)
return glue, forwarded_data
class Memory():
def __init__(self):
# Note: if RELOCATABLE, then only relative sizes can be computed, and we don't
# actually write out any absolute memory locations ({{{ STACK_BASE }}}
# does not exist, etc.)
# Memory layout:
# * first the static globals
self.global_base = shared.Settings.GLOBAL_BASE
self.static_bump = shared.Settings.STATIC_BUMP
# * then the stack (up on fastcomp, down on upstream)
self.stack_low = align_memory(self.global_base + self.static_bump)
self.stack_high = align_memory(self.stack_low + shared.Settings.TOTAL_STACK)
if shared.Settings.WASM_BACKEND:
self.stack_base = self.stack_high
self.stack_max = self.stack_low
else:
self.stack_base = self.stack_low
self.stack_max = self.stack_high
# * then dynamic memory begins
self.dynamic_base = align_memory(self.stack_high)
if self.dynamic_base >= shared.Settings.TOTAL_MEMORY:
exit_with_error('Memory is not large enough for static data (%d) plus the stack (%d), please increase TOTAL_MEMORY (%d) to at least %d' % (self.static_bump, shared.Settings.TOTAL_STACK, shared.Settings.TOTAL_MEMORY, self.dynamic_base))
def apply_memory(js):
# Apply the statically-at-compile-time computed memory locations.
memory = Memory()
# Write it all out
js = js.replace('{{{ STATIC_BUMP }}}', str(memory.static_bump))
js = js.replace('{{{ STACK_BASE }}}', str(memory.stack_base))
js = js.replace('{{{ STACK_MAX }}}', str(memory.stack_max))
js = js.replace('{{{ DYNAMIC_BASE }}}', str(memory.dynamic_base))
logger.debug('global_base: %d stack_base: %d, stack_max: %d, dynamic_base: %d, static bump: %d', memory.global_base, memory.stack_base, memory.stack_max, memory.dynamic_base, memory.static_bump)
shared.Settings.DYNAMIC_BASE = memory.dynamic_base
return js
def apply_table(js):
js = js.replace('{{{ WASM_TABLE_SIZE }}}', str(shared.Settings.WASM_TABLE_SIZE))
return js
def apply_script_source(js):
js = js.replace('{{{ TARGET_BASENAME }}}', shared.Settings.TARGET_BASENAME)
return js
def memory_and_global_initializers(pre, metadata, mem_init):
if shared.Settings.SIMD == 1:
pre = open(path_from_root(os.path.join('src', 'ecmascript_simd.js'))).read() + '\n\n' + pre
staticbump = shared.Settings.STATIC_BUMP
pthread = ''
if shared.Settings.USE_PTHREADS:
pthread = 'if (!ENVIRONMENT_IS_PTHREAD)'
global_initializers = ''
if not shared.Settings.MINIMAL_RUNTIME:
# In traditional runtime, global initializers are pushed to the __ATINIT__ array to be processed when runtime is loaded
# In MINIMAL_RUNTIME global initializers are invoked directly off of the asm[''] export object, so this does not apply.
global_initializers = global_initializer_funcs(metadata['initializers'])
if len(global_initializers) > 0:
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in global_initializers)
global_initializers = '/* global initializers */ {pthread} __ATINIT__.push({global_initializers});'.format(pthread=pthread, global_initializers=global_initializers)
else:
global_initializers = '/* global initializers */ /*__ATINIT__.push();*/'
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''\
STATICTOP = STATIC_BASE + {staticbump};
{global_initializers}
{mem_init}'''.format(staticbump=staticbump,
global_initializers=global_initializers,
mem_init=mem_init))
if shared.Settings.SIDE_MODULE:
pre = pre.replace('GLOBAL_BASE', 'gb')
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre)
return pre
def get_js_funcs(pre, funcs):
funcs_js = [funcs]
parts = pre.split('// ASM_LIBRARY FUNCTIONS\n')
if len(parts) > 1:
pre = parts[0]
funcs_js.append(parts[1])
return pre, funcs_js
def get_all_exported_functions(function_table_data):
# both asm.js and otherwise
all_exported_functions = set(shared.Settings.EXPORTED_FUNCTIONS)
# additional functions to export from asm, if they are implemented
for additional_export in shared.Settings.DEFAULT_LIBRARY_FUNCS_TO_INCLUDE:
all_exported_functions.add('_' + additional_export)
if shared.Settings.EXPORT_FUNCTION_TABLES:
for table in function_table_data.values():
for func in table.split('[')[1].split(']')[0].split(','):
if func[0] == '_':
all_exported_functions.add(func)
return all_exported_functions
def get_all_implemented(forwarded_json, metadata):
return set(metadata['implementedFunctions']).union(forwarded_json['Functions']['implementedFunctions'])
def report_missing_symbols(all_implemented, pre):
# we are not checking anyway, so just skip this
if not shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS and not shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
return
# the initial list of missing functions are that the user explicitly exported
# but were not implemented in compiled code
missing = list(set(shared.Settings.USER_EXPORTED_FUNCTIONS) - all_implemented)
for requested in missing:
if ('function ' + asstr(requested)) in pre:
continue
# special-case malloc, EXPORTED by default for internal use, but we bake in a
# trivial allocator and warn at runtime if used in ASSERTIONS
if missing == '_malloc':
continue
if shared.Settings.ERROR_ON_UNDEFINED_SYMBOLS:
exit_with_error('undefined exported function: "%s"', requested)
elif shared.Settings.WARN_ON_UNDEFINED_SYMBOLS:
shared.warning('undefined exported function: "%s"', requested)
def get_exported_implemented_functions(all_exported_functions, all_implemented, metadata):
funcs = set(metadata['exports'])
export_bindings = shared.Settings.EXPORT_BINDINGS
export_all = shared.Settings.EXPORT_ALL
for key in all_implemented:
if key in all_exported_functions or export_all or (export_bindings and key.startswith('_emscripten_bind')):
funcs.add(key)
if not export_all:
for name, alias in metadata['aliases'].items():
# here we export the aliases,
# if not the side module (which imports the alias)
# will not be able to get to the actual implementation
if alias in all_implemented and name in all_exported_functions:
funcs.add(alias)
funcs = list(funcs) + global_initializer_funcs(metadata['initializers'])
if shared.Settings.ALLOW_MEMORY_GROWTH:
funcs.append('_emscripten_replace_memory')
if not shared.Settings.SIDE_MODULE and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore', 'establishStackSpace']
if shared.Settings.EMTERPRETIFY:
funcs += ['emterpret']
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs += ['setAsyncState', 'emtStackSave', 'emtStackRestore', 'getEmtStackMax', 'setEmtStackMax']
return sorted(set(funcs))
def get_implemented_functions(metadata):
return set(metadata['implementedFunctions'])
def proxy_debug_print(sync):
if shared.Settings.PTHREADS_DEBUG:
if sync:
return 'warnOnce("sync proxying function " + code);'
else:
return 'warnOnce("async proxying function " + code);'
return ''
def include_asm_consts(pre, forwarded_json, metadata):
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
if metadata['asmConsts']:
exit_with_error('EM_ASM is not yet supported in shared wasm module (it cannot be stored in the wasm itself, need some solution)')
asm_consts, all_sigs = all_asm_consts(metadata)
asm_const_funcs = []
for sig, call_type in all_sigs:
if 'j' in sig:
exit_with_error('emscript: EM_ASM should not receive i64s as inputs, they are not valid in JS')
if '_emscripten_asm_const_' + call_type + sig in forwarded_json['Functions']['libraryFunctions']:
continue # Only one invoker needs to be emitted for each ASM_CONST (signature x call_type) item
forwarded_json['Functions']['libraryFunctions']['_emscripten_asm_const_' + call_type + sig] = 1
args = ['a%d' % i for i in range(len(sig) - 1)]
all_args = ['code'] + args
pre_asm_const = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
# In proxied function calls, positive integers 1, 2, 3, ... denote pointers
# to regular C compiled functions. Negative integers -1, -2, -3, ... denote
# indices to EM_ASM() blocks, so remap the EM_ASM() indices from 0, 1, 2,
# ... over to the negative integers starting at -1.
proxy_args = ['-1 - code', str(int(sync_proxy))] + args
pre_asm_const += ' if (ENVIRONMENT_IS_PTHREAD) { ' + proxy_debug_print(sync_proxy) + 'return _emscripten_proxy_to_main_thread_js(' + ', '.join(proxy_args) + '); }\n'
if shared.Settings.EMTERPRETIFY_ASYNC and shared.Settings.ASSERTIONS:
# we cannot have an EM_ASM on the stack when saving/loading
pre_asm_const += " assert(typeof EmterpreterAsync !== 'object' || EmterpreterAsync.state !== 2, 'cannot have an EM_ASM on the stack when emterpreter pauses/resumes - the JS is not emterpreted, so we would end up running it again from the start');\n"
asm_const_funcs.append(r'''
function _emscripten_asm_const_%s(%s) {
%s return ASM_CONSTS[code](%s);
}''' % (call_type + asstr(sig), ', '.join(all_args), pre_asm_const, ', '.join(args)))
asm_consts_text = '\nvar ASM_CONSTS = [' + ',\n '.join(asm_consts) + '];\n'
asm_funcs_text = '\n'.join(asm_const_funcs) + '\n'
em_js_funcs = create_em_js(forwarded_json, metadata)
em_js_text = '\n'.join(em_js_funcs) + '\n'
body_marker = '// === Body ==='
return pre.replace(body_marker, body_marker + '\n' + asm_consts_text + asstr(asm_funcs_text) + em_js_text)
# Test if the parentheses at body[openIdx] and body[closeIdx] are a match to
# each other.
def parentheses_match(body, openIdx, closeIdx):
if closeIdx < 0:
closeIdx += len(body)
count = 1
for i in range(openIdx + 1, closeIdx + 1):
if body[i] == body[openIdx]:
count += 1
elif body[i] == body[closeIdx]:
count -= 1
if count <= 0:
return i == closeIdx
return False
def trim_asm_const_body(body):
body = body.strip()
orig = None
while orig != body:
orig = body
if len(body) > 1 and body[0] == '"' and body[-1] == '"':
body = body[1:-1].replace('\\"', '"').strip()
if len(body) > 1 and body[0] == '{' and body[-1] == '}' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
if len(body) > 1 and body[0] == '(' and body[-1] == ')' and parentheses_match(body, 0, -1):
body = body[1:-1].strip()
return body
def all_asm_consts(metadata):
asm_consts = [0] * len(metadata['asmConsts'])
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
const = '{ ' + const + ' }'
args = []
arity = max(len(s) for s in sigs) - 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') ' + const
asm_consts[int(k)] = const
assert(len(sigs) == len(call_types))
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
return asm_consts, all_sigs
def unfloat(s):
"""lower float to double for ffis"""
return 'd' if s == 'f' else s
def make_function_tables_defs(implemented_functions, all_implemented, function_table_data, metadata):
class Counter(object):
next_bad_item = 0
next_item = 0
pre = []
in_table = set()
debug_tables = {}
def make_params(sig):
return ','.join('p%d' % p for p in range(len(sig) - 1))
def make_coerced_params(sig):
return ','.join(shared.JS.make_coercion('p%d', unfloat(sig[p + 1])) % p for p in range(len(sig) - 1))
def make_coercions(sig):
return ';'.join('p%d = %s' % (p, shared.JS.make_coercion('p%d' % p, sig[p + 1])) for p in range(len(sig) - 1)) + ';'
# when emulating function pointer casts, we need to know what is the target of each pointer
if shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM:
function_pointer_targets = {}
for sig, table in function_table_data.items():
start = table.index('[')
end = table.rindex(']')
body = table[start + 1:end].split(',')
for i, parsed in enumerate(x.strip() for x in body):
if parsed != '0':
assert i not in function_pointer_targets
function_pointer_targets[i] = [sig, str(parsed)]
def make_table(sig, raw):
if '[]' in raw:
return ('', '') # empty table
params = make_params(sig)
coerced_params = make_coerced_params(sig)
coercions = make_coercions(sig)
def make_bad(target=None):
i = Counter.next_bad_item
Counter.next_bad_item += 1
if target is None:
target = i
name = 'b' + str(i)
if not shared.Settings.ASSERTIONS:
if 'abort' in shared.Settings.RUNTIME_FUNCS_TO_IMPORT:
code = 'abort(%s);' % target
else:
# Advanced use: developers is generating code that does not include the function 'abort()'. Generate invalid
# function pointers to be no-op passthroughs that silently continue execution.
code = '\n/*execution is supposed to abort here, but you did not include "abort" in RUNTIME_FUNCS_TO_IMPORT (to save code size?). Silently trucking through, enjoy :)*/\n'
else:
code = 'nullFunc_' + sig + '(%d);' % target
if sig[0] != 'v':
code += 'return %s' % shared.JS.make_initializer(sig[0]) + ';'
return name, make_func(name, code, params, coercions)
bad, bad_func = make_bad() # the default bad func
if shared.Settings.ASSERTIONS <= 1:
Counter.pre = [bad_func]
else:
Counter.pre = []
start = raw.index('[')
end = raw.rindex(']')
body = raw[start + 1:end].split(',')
if shared.Settings.EMULATED_FUNCTION_POINTERS:
def receive(item):
if item == '0':
return item
if item not in all_implemented:
# this is not implemented; it would normally be wrapped, but with emulation, we just use it directly outside
return item
in_table.add(item)
return "asm['" + item + "']"
body = [receive(b) for b in body]
for j in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
curr = 'jsCall_%s_%s' % (sig, j)
body[1 + j] = curr
implemented_functions.add(curr)
Counter.next_item = 0
def fix_item(item):
j = Counter.next_item
Counter.next_item += 1
newline = Counter.next_item % 30 == 29
if item == '0':
# emulate all non-null pointer calls, if asked to
if j > 0 and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS and not shared.Settings.WASM and j in function_pointer_targets:
proper_sig, proper_target = function_pointer_targets[j]
if shared.Settings.EMULATED_FUNCTION_POINTERS:
if proper_target in all_implemented:
proper_target = "asm['" + proper_target + "']"
def make_emulated_param(i):
if i >= len(sig):
return shared.JS.make_initializer(proper_sig[i]) # extra param, just send a zero
return shared.JS.make_coercion('p%d' % (i - 1), proper_sig[i], convert_from=sig[i])
proper_code = proper_target + '(' + ','.join([make_emulated_param(i + 1) for i in range(len(proper_sig) - 1)]) + ')'
if proper_sig[0] != 'v':
# proper sig has a return, which the wrapper may or may not use
proper_code = shared.JS.make_coercion(proper_code, proper_sig[0])
if proper_sig[0] != sig[0]:
# first coercion ensured we call the target ok; this one ensures we return the right type in the wrapper
proper_code = shared.JS.make_coercion(proper_code, sig[0], convert_from=proper_sig[0])
if sig[0] != 'v':
proper_code = 'return ' + proper_code
else:
# proper sig has no return, we may need a fake return
if sig[0] != 'v':
proper_code = 'return ' + shared.JS.make_initializer(sig[0])
name = 'fpemu_%s_%d' % (sig, j)
wrapper = make_func(name, proper_code, params, coercions)
Counter.pre.append(wrapper)
return name if not newline else (name + '\n')
if shared.Settings.ASSERTIONS <= 1:
return bad if not newline else (bad + '\n')
specific_bad, specific_bad_func = make_bad(j)
Counter.pre.append(specific_bad_func)
return specific_bad if not newline else (specific_bad + '\n')
clean_item = item.replace("asm['", '').replace("']", '')
# when emulating function pointers, we don't need wrappers
# but if relocating, then we also have the copies in-module, and do
# in wasm we never need wrappers though
if clean_item not in implemented_functions and not (shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.RELOCATABLE) and not shared.Settings.WASM:
# this is imported into asm, we must wrap it
call_ident = clean_item
if call_ident in metadata['redirects']:
call_ident = metadata['redirects'][call_ident]
if not call_ident.startswith('_') and not call_ident.startswith('Math_'):
call_ident = '_' + call_ident
code = call_ident + '(' + coerced_params + ')'
if sig[0] != 'v':
# ffis cannot return float
if sig[0] == 'f':
code = '+' + code
code = 'return ' + shared.JS.make_coercion(code, sig[0])
code += ';'
Counter.pre.append(make_func(clean_item + '__wrapper', code, params, coercions))
assert not sig == 'X', 'must know the signature in order to create a wrapper for "%s" (TODO for shared wasm modules)' % item
return clean_item + '__wrapper'
return item if not newline else (item + '\n')
if shared.Settings.ASSERTIONS >= 2:
debug_tables[sig] = body
body = ','.join(fix_item(b) for b in body)
return ('\n'.join(Counter.pre), ''.join([raw[:start + 1], body, raw[end:]]))
infos = [make_table(sig, raw) for sig, raw in function_table_data.items()]
Counter.pre = []
function_tables_defs = '\n'.join([info[0] for info in infos]) + '\n'
function_tables_defs += '\n// EMSCRIPTEN_END_FUNCS\n'
function_tables_defs += '\n'.join([info[1] for info in infos])
return in_table, debug_tables, function_tables_defs
def make_func(name, code, params, coercions):
return 'function %s(%s) {\n %s %s\n}' % (name, params, coercions, code)
def math_fix(g):
return g if not g.startswith('Math_') else g.split('_')[1]
# asm.js function tables have one table in each linked asm.js module, so we
# can't just dynCall into them - ftCall exists for that purpose. In wasm,
# even linked modules share the table, so it's all fine.
def asm_js_emulated_function_pointers():
return shared.Settings.EMULATED_FUNCTION_POINTERS and not shared.Settings.WASM
def make_function_tables_impls(function_table_data):
function_tables_impls = []
for sig, table in function_table_data.items():
args = ','.join(['a' + str(i) for i in range(1, len(sig))])
arg_coercions = ' '.join(['a' + str(i) + '=' + shared.JS.make_coercion('a' + str(i), sig[i]) + ';' for i in range(1, len(sig))])
coerced_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i]) for i in range(1, len(sig))])
sig_mask = str(table.count(','))
if not (shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS):
ret = 'FUNCTION_TABLE_%s[index&%s](%s)' % (sig, sig_mask, coerced_args)
else:
# for wasm with emulated function pointers, emit an mft_SIG(..) call, we avoid asm.js function tables there.
ret = 'mftCall_%s(index%s%s)' % (sig, ',' if len(sig) > 1 else '', coerced_args)
ret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion(ret, sig[0])
if not asm_js_emulated_function_pointers():
function_tables_impls.append('''
function dynCall_%s(index%s%s) {
index = index|0;
%s
%s;
}
''' % (sig, ',' if len(sig) > 1 else '', args, arg_coercions, ret))
else:
function_tables_impls.append('''
var dynCall_%s = ftCall_%s;
''' % (sig, sig))
ffi_args = ','.join([shared.JS.make_coercion('a' + str(i), sig[i], ffi_arg=True) for i in range(1, len(sig))])
for i in range(shared.Settings.RESERVED_FUNCTION_POINTERS):
jsret = ('return ' if sig[0] != 'v' else '') + shared.JS.make_coercion('jsCall_%s(%d%s%s)' % (sig, i, ',' if ffi_args else '', ffi_args), sig[0], ffi_result=True)
function_tables_impls.append('''
function jsCall_%s_%s(%s) {
%s
%s;
}
''' % (sig, i, args, arg_coercions, jsret))
return function_tables_impls
def create_mftCall_funcs(function_table_data):
if not asm_js_emulated_function_pointers():
return []
if shared.Settings.WASM or not shared.Settings.RELOCATABLE:
return []
mftCall_funcs = []
# in wasm, emulated function pointers are just simple table calls
for sig, table in function_table_data.items():
return_type, sig_args = sig[0], sig[1:]
num_args = len(sig_args)
params = ','.join(['ptr'] + ['p%d' % i for i in range(num_args)])
coerced_params = ','.join([shared.JS.make_coercion('ptr', 'i')] + [shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i])) for i in range(num_args)])
coercions = ';'.join(['ptr = ptr | 0'] + ['p%d = %s' % (i, shared.JS.make_coercion('p%d' % i, unfloat(sig_args[i]))) for i in range(num_args)]) + ';'
mini_coerced_params = ','.join([shared.JS.make_coercion('p%d' % i, sig_args[i]) for i in range(num_args)])
maybe_return = '' if return_type == 'v' else 'return'
final_return = maybe_return + ' ' + shared.JS.make_coercion('ftCall_' + sig + '(' + coerced_params + ')', unfloat(return_type)) + ';'
if shared.Settings.EMULATED_FUNCTION_POINTERS == 1:
body = final_return
else:
sig_mask = str(table.count(','))
body = ('if (((ptr|0) >= (fb|0)) & ((ptr|0) < (fb + ' + sig_mask + ' | 0))) { ' + maybe_return + ' ' +
shared.JS.make_coercion(
'FUNCTION_TABLE_' + sig + '[(ptr-fb)&' + sig_mask + '](' +
mini_coerced_params + ')', return_type, ffi_arg=True
) + '; ' + ('return;' if return_type == 'v' else '') + ' }' + final_return)
mftCall_funcs.append(make_func('mftCall_' + sig, body, params, coercions) + '\n')
return mftCall_funcs
def get_function_pointer_error(sig, function_table_sigs):
if shared.Settings.ASSERTIONS == 0:
# Release build: do the most minimal sized abort possible
return "abort();"
else:
# ASSERTIONS-enabled build, identify the pointer and the failing signature.
return "abortFnPtrError(x, '" + sig + "');"
def signature_sort_key(sig):
def closure(other):
ret = 0
minlen = min(len(other), len(sig))
maxlen = min(len(other), len(sig))
if other.startswith(sig) or sig.startswith(other):
ret -= 1000 # prioritize prefixes, could be dropped params
ret -= 133 * difflib.SequenceMatcher(a=other, b=sig).ratio() # prioritize on diff similarity
ret += 15 * abs(len(other) - len(sig)) / float(maxlen) # deprioritize the bigger the length difference is
for i in range(minlen):
if other[i] == sig[i]:
ret -= 5 / float(maxlen) # prioritize on identically-placed params
ret += 20 * len(other) # deprioritize on length
return ret
return closure
def asm_backend_uses(metadata, symbol):
# If doing dynamic linking, we should generate full set of runtime primitives, since we cannot know up front ahead
# of time what the dynamically linked in modules will need. Also with SAFE_HEAP and Emterpretify, generate full set of views.
if shared.Settings.MAIN_MODULE or shared.Settings.SIDE_MODULE or shared.Settings.SAFE_HEAP or shared.Settings.EMTERPRETIFY:
return True
# Allow querying asm_backend_uses(metadata, 'Math.') to find if any of the Math objects are used
if symbol.endswith('.'):
return any(e.startswith(symbol) for e in metadata['externUses'])
else:
# Querying a single symbol
return symbol in metadata['externUses']
def create_asm_global_funcs(bg_funcs, metadata):
maths = ['Math.' + func for func in ['floor', 'abs', 'sqrt', 'pow', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'atan2', 'exp', 'log', 'ceil', 'imul', 'min', 'max', 'clz32']]
if provide_fround():
maths += ['Math.fround']
asm_global_funcs = ''
for math in maths:
if asm_backend_uses(metadata, math):
asm_global_funcs += ' var ' + math.replace('.', '_') + '=global' + access_quote(math) + ';\n'
asm_global_funcs += ''.join([' var ' + unminified + '=env' + access_quote(math_fix(minified)) + ';\n' for (minified, unminified) in bg_funcs])
asm_global_funcs += global_simd_funcs(access_quote, metadata)
if shared.Settings.USE_PTHREADS:
asm_global_funcs += ''.join([' var Atomics_' + ty + '=global' + access_quote('Atomics') + access_quote(ty) + ';\n' for ty in ['load', 'store', 'exchange', 'compareExchange', 'add', 'sub', 'and', 'or', 'xor']])
return asm_global_funcs
def create_asm_global_vars(bg_vars):
asm_global_vars = ''.join([' var ' + unminified + '=env' + access_quote(minified) + '|0;\n' for (minified, unminified) in bg_vars])
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# wasm side modules internally define their stack, these are set at module startup time
asm_global_vars += '\n var STACKTOP = 0, STACK_MAX = 0;\n'
return asm_global_vars
def global_simd_funcs(access_quote, metadata):
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
if not (metadata['simd'] or shared.Settings.SIMD):
return ''
def string_contains_any(s, str_list):
return any(sub in s for sub in str_list)
nonexisting_simd_symbols = ['Int8x16_fromInt8x16', 'Uint8x16_fromUint8x16', 'Int16x8_fromInt16x8', 'Uint16x8_fromUint16x8', 'Int32x4_fromInt32x4', 'Uint32x4_fromUint32x4', 'Float32x4_fromFloat32x4', 'Float64x2_fromFloat64x2']
nonexisting_simd_symbols += ['Int32x4_addSaturate', 'Int32x4_subSaturate', 'Uint32x4_addSaturate', 'Uint32x4_subSaturate']
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8', 'Float64x2'] for y in ['load2', 'store2']]
nonexisting_simd_symbols += [(x + '_' + y) for x in ['Int8x16', 'Uint8x16', 'Int16x8', 'Uint16x8'] for y in ['load1', 'store1']]
simd = make_simd_types(metadata)
simd_func_text = ''
simd_func_text += ''.join([' var SIMD_' + ty + '=global' + access_quote('SIMD') + access_quote(ty) + ';\n' for ty in simd['types']])
def generate_symbols(types, funcs):
symbols = [' var SIMD_' + ty + '_' + g + '=SIMD_' + ty + access_quote(g) + ';\n' for ty in types for g in funcs]
symbols = [x for x in symbols if not string_contains_any(x, nonexisting_simd_symbols)]
return ''.join(symbols)
simd_func_text += generate_symbols(simd['int_types'], simd['int_funcs'])
simd_func_text += generate_symbols(simd['float_types'], simd['float_funcs'])
simd_func_text += generate_symbols(simd['bool_types'], simd['bool_funcs'])
# SIMD conversions (not bitcasts) between same lane sizes:
def add_simd_cast(dst, src):
return ' var SIMD_' + dst + '_from' + src + '=SIMD_' + dst + '.from' + src + ';\n'
def add_simd_casts(t1, t2):
return add_simd_cast(t1, t2) + add_simd_cast(t2, t1)
# Bug: Skip importing conversions for int<->uint for now, they don't validate
# as asm.js. https://bugzilla.mozilla.org/show_bug.cgi?id=1313512
# This is not an issue when building SSEx code, because it doesn't use these.
# (but it will be an issue if using SIMD.js intrinsics from vector.h to
# explicitly call these)
# if metadata['simdInt8x16'] and metadata['simdUint8x16']:
# simd_func_text += add_simd_casts('Int8x16', 'Uint8x16')
# if metadata['simdInt16x8'] and metadata['simdUint16x8']:
# simd_func_text += add_simd_casts('Int16x8', 'Uint16x8')
# if metadata['simdInt32x4'] and metadata['simdUint32x4']:
# simd_func_text += add_simd_casts('Int32x4', 'Uint32x4')
if metadata['simdInt32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Int32x4', 'Float32x4')
if metadata['simdUint32x4'] and metadata['simdFloat32x4']:
simd_func_text += add_simd_casts('Uint32x4', 'Float32x4')
if metadata['simdInt32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Int32x4', 'Float64x2') # Unofficial, needed for emscripten_int32x4_fromFloat64x2
if metadata['simdUint32x4'] and metadata['simdFloat64x2']:
simd_func_text += add_simd_cast('Uint32x4', 'Float64x2') # Unofficial, needed for emscripten_uint32x4_fromFloat64x2
# Unofficial, Bool64x2 does not yet exist, but needed for Float64x2 comparisons.
if metadata['simdFloat64x2']:
simd_func_text += ' var SIMD_Int32x4_fromBool64x2Bits = global.SIMD.Int32x4.fromBool64x2Bits;\n'
return simd_func_text
def make_simd_types(metadata):
simd_float_types = []
simd_int_types = []
simd_bool_types = []
simd_funcs = ['splat', 'check', 'extractLane', 'replaceLane']
simd_intfloat_funcs = ['add', 'sub', 'neg', 'mul',
'equal', 'lessThan', 'greaterThan',
'notEqual', 'lessThanOrEqual', 'greaterThanOrEqual',
'select', 'swizzle', 'shuffle',
'load', 'store', 'load1', 'store1', 'load2', 'store2']
simd_intbool_funcs = ['and', 'xor', 'or', 'not']
if metadata['simdUint8x16']:
simd_int_types += ['Uint8x16']
simd_intfloat_funcs += ['fromUint8x16Bits']
if metadata['simdInt8x16']:
simd_int_types += ['Int8x16']
simd_intfloat_funcs += ['fromInt8x16Bits']
if metadata['simdUint16x8']:
simd_int_types += ['Uint16x8']
simd_intfloat_funcs += ['fromUint16x8Bits']
if metadata['simdInt16x8']:
simd_int_types += ['Int16x8']
simd_intfloat_funcs += ['fromInt16x8Bits']
if metadata['simdUint32x4']:
simd_int_types += ['Uint32x4']
simd_intfloat_funcs += ['fromUint32x4Bits']
if metadata['simdInt32x4'] or shared.Settings.SIMD:
# Always import Int32x4 when building with -s SIMD=1, since memcpy is SIMD optimized.
simd_int_types += ['Int32x4']
simd_intfloat_funcs += ['fromInt32x4Bits']
if metadata['simdFloat32x4']:
simd_float_types += ['Float32x4']
simd_intfloat_funcs += ['fromFloat32x4Bits']
if metadata['simdFloat64x2']:
simd_float_types += ['Float64x2']
simd_intfloat_funcs += ['fromFloat64x2Bits']
if metadata['simdBool8x16']:
simd_bool_types += ['Bool8x16']
if metadata['simdBool16x8']:
simd_bool_types += ['Bool16x8']
if metadata['simdBool32x4']:
simd_bool_types += ['Bool32x4']
if metadata['simdBool64x2']:
simd_bool_types += ['Bool64x2']
simd_float_funcs = simd_funcs + simd_intfloat_funcs + ['div', 'min', 'max', 'minNum', 'maxNum', 'sqrt',
'abs', 'reciprocalApproximation', 'reciprocalSqrtApproximation']
simd_int_funcs = simd_funcs + simd_intfloat_funcs + simd_intbool_funcs + ['shiftLeftByScalar', 'shiftRightByScalar', 'addSaturate', 'subSaturate']
simd_bool_funcs = simd_funcs + simd_intbool_funcs + ['anyTrue', 'allTrue']
simd_types = simd_float_types + simd_int_types + simd_bool_types
return {
'types': simd_types,
'float_types': simd_float_types,
'int_types': simd_int_types,
'bool_types': simd_bool_types,
'funcs': simd_funcs,
'float_funcs': simd_float_funcs,
'int_funcs': simd_int_funcs,
'bool_funcs': simd_bool_funcs,
'intfloat_funcs': simd_intfloat_funcs,
'intbool_funcs': simd_intbool_funcs,
}
def asm_safe_heap():
"""optimized safe heap in asm, when we can"""
return shared.Settings.SAFE_HEAP and not shared.Settings.SAFE_HEAP_LOG and not shared.Settings.RELOCATABLE
def provide_fround():
return shared.Settings.PRECISE_F32 or shared.Settings.SIMD
def create_asm_setup(debug_tables, function_table_data, invoke_function_names, metadata):
function_table_sigs = function_table_data.keys()
asm_setup = ''
if shared.Settings.ASSERTIONS >= 2:
debug_tables_map = 'var debug_tables = {\n'
for sig in function_table_data:
# if the table is empty, debug_tables will not contain it
body = debug_tables.get(sig, [])
asm_setup += 'var debug_table_' + sig + ' = [' + ','.join(['0' if x == '0' else "'" + x.replace("'", '"') + "'" for x in body]) + '];\n'
debug_tables_map += " '" + sig + "': debug_table_" + sig + ',\n'
asm_setup += debug_tables_map + '};\n'
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
asm_setup += 'function nullFunc_' + sig + '(x) { ' + get_function_pointer_error(sig, function_table_sigs) + ' }\n'
if shared.Settings.RELOCATABLE:
if not shared.Settings.SIDE_MODULE:
asm_setup += 'var gb = GLOBAL_BASE, fb = 0;\n'
side = 'parent' if shared.Settings.SIDE_MODULE else ''
def check(extern):
if shared.Settings.ASSERTIONS:
return ('\n assert(%sModule["%s"] || %s, "external symbol `%s` is missing.' % (side, extern, extern, extern) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=1 in the environment");')
return ''
for extern in metadata['externs']:
asm_setup += 'var g$' + extern + ' = function() {' + check(extern) + '\n return ' + side + 'Module["' + extern + '"];\n}\n'
for extern in metadata['externFunctions']:
barename, sig = extern.split('$')
fullname = "fp$" + extern
key = '%sModule["%s"]' % (side, fullname)
asm_setup += '''\
var %s = function() {
if (!%s) { %s
var fid = addFunction(%sModule["%s"] || %s, "%s");
%s = fid;
}
return %s;
}
''' % (fullname, key, check(barename), side, barename, barename, sig, key, key)
asm_setup += create_invoke_wrappers(invoke_function_names)
asm_setup += setup_function_pointers(function_table_sigs)
if shared.Settings.EMULATED_FUNCTION_POINTERS:
function_tables_impls = make_function_tables_impls(function_table_data)
asm_setup += '\n' + '\n'.join(function_tables_impls) + '\n'
return asm_setup
def setup_function_pointers(function_table_sigs):
asm_setup = ''
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
asm_setup += '\n' + shared.JS.make_jscall(sig) + '\n'
# nothing special to do here for wasm, we just use dynCalls
if not shared.Settings.WASM:
if shared.Settings.EMULATED_FUNCTION_POINTERS:
args = ['a%d' % i for i in range(len(sig) - 1)]
full_args = ['x'] + args
table_access = 'FUNCTION_TABLE_' + sig
if shared.Settings.SIDE_MODULE:
table_access = 'parentModule["' + table_access + '"]' # side module tables were merged into the parent, we need to access the global one
table_read = table_access + '[x]'
prelude = ''
if shared.Settings.ASSERTIONS:
prelude = '''
if (x < 0 || x >= %s.length) { err("Function table mask error (out of range)"); %s ; abort(x) }''' % (table_access, get_function_pointer_error(sig, function_table_sigs))
asm_setup += '''
function ftCall_%s(%s) {%s
return %s(%s);
}
''' % (sig, ', '.join(full_args), prelude, table_read, ', '.join(args))
return asm_setup
def create_basic_funcs(function_table_sigs, invoke_function_names):
basic_funcs = shared.Settings.RUNTIME_FUNCS_TO_IMPORT
if shared.Settings.STACK_OVERFLOW_CHECK:
basic_funcs += ['abortStackOverflow']
if shared.Settings.EMTERPRETIFY:
basic_funcs += ['abortStackOverflowEmterpreter']
if shared.Settings.SAFE_HEAP:
if asm_safe_heap():
basic_funcs += ['segfault', 'alignfault', 'ftfault']
else:
# Binaryen generates calls to these two so they are always needed with wasm
if shared.Settings.WASM:
basic_funcs += ['segfault', 'alignfault']
basic_funcs += ['SAFE_HEAP_LOAD', 'SAFE_HEAP_LOAD_D', 'SAFE_HEAP_STORE', 'SAFE_HEAP_STORE_D', 'SAFE_FT_MASK']
if shared.Settings.ASSERTIONS:
for sig in function_table_sigs:
basic_funcs += ['nullFunc_' + sig]
basic_funcs += invoke_function_names
for sig in function_table_sigs:
if shared.Settings.RESERVED_FUNCTION_POINTERS:
basic_funcs.append('jsCall_%s' % sig)
if asm_js_emulated_function_pointers():
basic_funcs.append('ftCall_%s' % sig)
return basic_funcs
def create_basic_vars(exported_implemented_functions, forwarded_json, metadata):
basic_vars = []
if 'tempDoublePtr' in shared.Settings.ASM_PRIMITIVE_VARS:
basic_vars += ['tempDoublePtr']
if shared.Settings.RELOCATABLE:
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE):
basic_vars += ['gb', 'fb', 'STACKTOP', 'STACK_MAX']
else:
# wasm side modules have a specific convention for these
basic_vars += ['__memory_base', '__table_base']
if shared.Settings.EMTERPRETIFY:
basic_vars += ['EMTSTACKTOP', 'EMT_STACK_MAX', 'eb']
return basic_vars
def create_exports(exported_implemented_functions, in_table, function_table_data, metadata):
asm_runtime_funcs = create_asm_runtime_funcs()
all_exported = exported_implemented_functions + asm_runtime_funcs + function_tables(function_table_data)
# In asm.js + emulated function pointers, export all the table because we use
# JS to add the asm.js module's functions to the table (which is external
# in this mode). In wasm, we don't need that since wasm modules can
# directly add functions to the imported Table.
if not shared.Settings.WASM and shared.Settings.EMULATED_FUNCTION_POINTERS:
all_exported += in_table
exports = []
for export in sorted(set(all_exported)):
exports.append(quote(export) + ": " + export)
if shared.Settings.WASM and shared.Settings.SIDE_MODULE:
# named globals in side wasm modules are exported globals from asm/wasm
for k, v in metadata['namedGlobals'].items():
exports.append(quote('_' + str(k)) + ': ' + str(v))
# aliases become additional exports
for k, v in metadata['aliases'].items():
exports.append(quote(str(k)) + ': ' + str(v))
# shared wasm emulated function pointer mode requires us to know the function pointer for
# each function. export fp$func => function pointer for func
if shared.Settings.WASM and shared.Settings.RELOCATABLE and shared.Settings.EMULATE_FUNCTION_POINTER_CASTS:
for k, v in metadata['functionPointers'].items():
exports.append(quote('fp$' + str(k)) + ': ' + str(v))
return '{ ' + ', '.join(exports) + ' }'
def create_asm_runtime_funcs():
funcs = []
if not (shared.Settings.WASM and shared.Settings.SIDE_MODULE) and not shared.Settings.MINIMAL_RUNTIME:
funcs += ['stackAlloc', 'stackSave', 'stackRestore', 'establishStackSpace']
return funcs
def function_tables(function_table_data):
if not asm_js_emulated_function_pointers():
return ['dynCall_' + table for table in function_table_data]
else:
return []
def create_the_global(metadata):
# the global is only needed for asm.js
if shared.Settings.WASM:
return '{}'
fundamentals = []
if asm_backend_uses(metadata, 'Math.'):
fundamentals += ['Math']
for f in ['Int8Array', 'Int16Array', 'Int32Array', 'Uint8Array', 'Uint16Array', 'Uint32Array', 'Float32Array', 'Float64Array', 'NaN', 'Infinity']:
if asm_backend_uses(metadata, f):
fundamentals += [f]
if metadata['simd'] or shared.Settings.SIMD:
# Always import SIMD when building with -s SIMD=1, since in that mode memcpy is SIMD optimized.
fundamentals += ['SIMD']
return '{ ' + ', '.join(['"' + math_fix(s) + '": ' + s for s in fundamentals]) + ' }'
RUNTIME_ASSERTIONS = '''
assert(runtimeInitialized, 'you need to wait for the runtime to be ready (e.g. wait for main() to be called)');
assert(!runtimeExited, 'the runtime was exited (use NO_EXIT_RUNTIME to keep it alive after main() exits)');'''
def create_receiving(function_table_data, function_tables_defs, exported_implemented_functions, initializers):
receiving = ''
if not shared.Settings.ASSERTIONS or shared.Settings.MINIMAL_RUNTIME:
runtime_assertions = ''
else:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are some support code.
# WASM=1 already inserts runtime assertions, so no need to do it again here (see create_receiving_wasm)
if not shared.Settings.WASM:
receiving_functions = [f for f in exported_implemented_functions if f not in ('_memcpy', '_memset', '_emscripten_replace_memory', '__start_module')]
wrappers = []
for name in receiving_functions:
wrappers.append('''\
var real_%(name)s = asm["%(name)s"];
asm["%(name)s"] = function() {%(runtime_assertions)s
return real_%(name)s.apply(null, arguments);
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving = '\n'.join(wrappers)
shared.Settings.MODULE_EXPORTS = module_exports = exported_implemented_functions + function_tables(function_table_data)
if not shared.Settings.SWAPPABLE_ASM_MODULE:
if shared.Settings.DECLARE_ASM_MODULE_EXPORTS:
imported_exports = [s for s in module_exports if s not in initializers]
if shared.Settings.WASM and shared.Settings.MINIMAL_RUNTIME:
# In Wasm exports are assigned inside a function to variables existing in top level JS scope, i.e.
# var _main;
# WebAssembly.instantiate(Module["wasm"], imports).then((function(output) {
# var asm = output.instance.exports;
# _main = asm["_main"];
receiving += '\n'.join([s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
if shared.Settings.MINIMAL_RUNTIME:
# In asm.js exports can be directly processed at top level, i.e.
# var asm = Module["asm"](asmGlobalArg, asmLibraryArg, buffer);
# var _main = asm["_main"];
receiving += '\n'.join(['var ' + s + ' = asm["' + s + '"];' for s in imported_exports]) + '\n'
else:
receiving += '\n'.join(['var ' + s + ' = Module["' + s + '"] = asm["' + s + '"];' for s in module_exports]) + '\n'
else:
if shared.Settings.target_environment_may_be('node') and shared.Settings.target_environment_may_be('web'):
global_object = '(typeof process !== "undefined" ? global : this)'
elif shared.Settings.target_environment_may_be('node'):
global_object = 'global'
else:
global_object = 'this'
if shared.Settings.MINIMAL_RUNTIME:
module_assign = ''
else:
module_assign = 'Module[__exportedFunc] = '
receiving += 'for(var __exportedFunc in asm) ' + global_object + '[__exportedFunc] = ' + module_assign + 'asm[__exportedFunc];\n'
else:
receiving += 'Module["asm"] = asm;\n'
wrappers = []
for name in module_exports:
wrappers.append('''\
var %(name)s = Module["%(name)s"] = function() {%(runtime_assertions)s
return Module["asm"]["%(name)s"].apply(null, arguments)
};
''' % {'name': name, 'runtime_assertions': runtime_assertions})
receiving += '\n'.join(wrappers)
if shared.Settings.EXPORT_FUNCTION_TABLES and not shared.Settings.WASM:
for table in function_table_data.values():
tableName = table.split()[1]
table = table.replace('var ' + tableName, 'var ' + tableName + ' = Module["' + tableName + '"]')
receiving += table + '\n'
if shared.Settings.EMULATED_FUNCTION_POINTERS:
# in asm.js emulated function tables, emit the table on the outside, where
# JS can manage it (for wasm, a native wasm Table is used directly, and we
# don't need this)
if not shared.Settings.WASM:
receiving += '\n' + function_tables_defs.replace('// EMSCRIPTEN_END_FUNCS\n', '')
# wasm still needs definitions for dyncalls on the outside, for JS
receiving += '\n' + ''.join(['Module["dynCall_%s"] = dynCall_%s\n' % (sig, sig) for sig in function_table_data])
if not shared.Settings.WASM:
for sig in function_table_data.keys():
name = 'FUNCTION_TABLE_' + sig
fullname = name if not shared.Settings.SIDE_MODULE else ('SIDE_' + name)
receiving += 'Module["' + name + '"] = ' + fullname + ';\n'
return receiving
def create_fp_accessors(metadata):
if not shared.Settings.RELOCATABLE:
return ''
# Create `fp$XXX` handlers for determining function pionters (table addresses)
# at runtime.
# For SIDE_MODULEs these are generated by the proxyHandler at runtime.
accessors = []
for fullname in metadata['declares']:
if not fullname.startswith('fp$'):
continue
_, name, sig = fullname.split('$')
mangled = asmjs_mangle(name)
side = 'parent' if shared.Settings.SIDE_MODULE else ''
assertion = ('\n assert(%sModule["%s"] || typeof %s !== "undefined", "external function `%s` is missing.' % (side, mangled, mangled, name) +
'perhaps a side module was not linked in? if this symbol was expected to arrive '
'from a system library, try to build the MAIN_MODULE with '
'EMCC_FORCE_STDLIBS=XX in the environment");')
accessors.append('''
Module['%(full)s'] = function() {
%(assert)s
var func = Module['%(mangled)s'];
if (!func)
func = %(mangled)s;
var fp = addFunction(func, '%(sig)s');
Module['%(full)s'] = function() { return fp };
return fp;
}
''' % {'full': asmjs_mangle(fullname), 'mangled': mangled, 'assert': assertion, 'sig': sig})
return '\n'.join(accessors)
def create_named_globals(metadata):
if not shared.Settings.RELOCATABLE:
return ''
named_globals = '''
var NAMED_GLOBALS = {
%s
};
for (var named in NAMED_GLOBALS) {
Module['_' + named] = gb + NAMED_GLOBALS[named];
}
Module['NAMED_GLOBALS'] = NAMED_GLOBALS;
''' % ',\n '.join('"' + k + '": ' + str(v) for k, v in metadata['namedGlobals'].items())
if shared.Settings.WASM:
# wasm side modules are pure wasm, and cannot create their g$..() methods, so we help them out
# TODO: this works if we are the main module, but if the supplying module is later, it won't, so
# we'll need another solution for that. one option is to scan the module imports, if/when
# wasm supports that, then the loader can do this.
named_globals += '''
for (var named in NAMED_GLOBALS) {
(function(named) {
var addr = Module['_' + named];
Module['g$_' + named] = function() { return addr };
})(named);
}
'''
named_globals += ''.join(["Module['%s'] = Module['%s']\n" % (k, v) for k, v in metadata['aliases'].items()])
return named_globals
def create_runtime_funcs_asmjs(exports, metadata):
if shared.Settings.ASSERTIONS or shared.Settings.STACK_OVERFLOW_CHECK >= 2:
stack_check = ' if ((STACKTOP|0) >= (STACK_MAX|0)) abortStackOverflow(size|0);\n'
else:
stack_check = ''
funcs = ['''
function stackAlloc(size) {
size = size|0;
var ret = 0;
ret = STACKTOP;
STACKTOP = (STACKTOP + size)|0;
STACKTOP = (STACKTOP + 15)&-16;
%s
return ret|0;
}
function stackSave() {
return STACKTOP|0;
}
function stackRestore(top) {
top = top|0;
STACKTOP = top;
}
function establishStackSpace(stackBase, stackMax) {
stackBase = stackBase|0;
stackMax = stackMax|0;
STACKTOP = stackBase;
STACK_MAX = stackMax;
}
''' % stack_check]
if shared.Settings.MINIMAL_RUNTIME:
# MINIMAL_RUNTIME moves stack functions to library.
funcs = []
if shared.Settings.EMTERPRETIFY:
funcs.append('''
function emterpret(pc) { // this will be replaced when the emterpreter code is generated; adding it here allows validation until then
pc = pc | 0;
assert(0);
}''')
if shared.Settings.EMTERPRETIFY_ASYNC:
funcs.append('''
function setAsyncState(x) {
x = x | 0;
asyncState = x;
}
function emtStackSave() {
return EMTSTACKTOP|0;
}
function emtStackRestore(x) {
x = x | 0;
EMTSTACKTOP = x;
}
function getEmtStackMax() {
return EMT_STACK_MAX | 0;
}
function setEmtStackMax(x) {
x = x | 0;
EMT_STACK_MAX = x;
}
''')
if asm_safe_heap():
if '_sbrk' in metadata['implementedFunctions']:
brk_check = 'if ((dest + bytes|0) > (HEAP32[(_emscripten_get_sbrk_ptr()|0)>>2]|0)) segfault();'
else:
# sbrk and malloc were not linked in, but SAFE_HEAP is used - so safe heap
# can ignore the sbrk location.
brk_check = ''
funcs.append('''
function SAFE_HEAP_STORE(dest, value, bytes) {
dest = dest | 0;
value = value | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
HEAP32[dest>>2] = value;
} else if ((bytes|0) == 1) {
HEAP8[dest>>0] = value;
} else {
if ((dest&1)) alignfault();
HEAP16[dest>>1] = value;
}
}
function SAFE_HEAP_STORE_D(dest, value, bytes) {
dest = dest | 0;
value = +value;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
HEAPF64[dest>>3] = value;
} else {
if ((dest&3)) alignfault();
HEAPF32[dest>>2] = value;
}
}
function SAFE_HEAP_LOAD(dest, bytes, unsigned) {
dest = dest | 0;
bytes = bytes | 0;
unsigned = unsigned | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 4) {
if ((dest&3)) alignfault();
return HEAP32[dest>>2] | 0;
} else if ((bytes|0) == 1) {
if (unsigned) {
return HEAPU8[dest>>0] | 0;
} else {
return HEAP8[dest>>0] | 0;
}
}
if ((dest&1)) alignfault();
if (unsigned) return HEAPU16[dest>>1] | 0;
return HEAP16[dest>>1] | 0;
}
function SAFE_HEAP_LOAD_D(dest, bytes) {
dest = dest | 0;
bytes = bytes | 0;
if ((dest|0) <= 0) segfault();
%(brk_check)s
if ((bytes|0) == 8) {
if ((dest&7)) alignfault();
return +HEAPF64[dest>>3];
}
if ((dest&3)) alignfault();
return +HEAPF32[dest>>2];
}
function SAFE_FT_MASK(value, mask) {
value = value | 0;
mask = mask | 0;
var ret = 0;
ret = value & mask;
if ((ret|0) != (value|0)) ftfault();
return ret | 0;
}
''' % {'brk_check': brk_check})
return funcs
def create_asm_start_pre(asm_setup, the_global, sending, metadata):
shared_array_buffer = ''
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
shared_array_buffer = "asmGlobalArg['Atomics'] = Atomics;"
module_global = 'var asmGlobalArg = ' + the_global + ';'
module_library = 'var asmLibraryArg = ' + sending + ';'
asm_function_top = ('// EMSCRIPTEN_START_ASM\n'
'var asm = (/** @suppress {uselessCode} */ function(global, env, buffer) {')
use_asm = "'almost asm';"
if shared.Settings.ASM_JS == 1:
use_asm = "'use asm';"
lines = [
asm_setup,
module_global,
shared_array_buffer,
module_library,
asm_function_top,
use_asm,
create_first_in_asm(),
]
return '\n'.join(lines)
def create_asm_temp_vars(metadata):
temp_ints = ['__THREW__', 'threwValue', 'setjmpId', 'tempInt', 'tempBigInt', 'tempBigIntS', 'tempValue']
temp_doubles = ['tempDouble']
rtn = ''
for i in temp_ints:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0;\n'
for i in temp_doubles:
if i in shared.Settings.ASM_PRIMITIVE_VARS:
rtn += 'var ' + i + ' = 0.0;\n'
if asm_backend_uses(metadata, 'NaN'):
rtn += 'var nan = global%s;\n' % (access_quote('NaN'))
if asm_backend_uses(metadata, 'Infinity'):
rtn += 'var inf = global%s;\n' % (access_quote('Infinity'))
return rtn
def create_asm_runtime_thread_local_vars():
if not shared.Settings.USE_PTHREADS:
return ''
return '''
var __pthread_ptr = 0;
var __pthread_is_main_runtime_thread = 0;
var __pthread_is_main_browser_thread = 0;
'''
def create_replace_memory(metadata):
if not shared.Settings.ALLOW_MEMORY_GROWTH:
return ''
emscripten_replace_memory = '''
function _emscripten_replace_memory(newBuffer) {
'''
for heap, view in [
('HEAP8', 'Int8Array'),
('HEAPU8', 'Uint8Array'),
('HEAP16', 'Int16Array'),
('HEAPU16', 'Uint16Array'),
('HEAP32', 'Int32Array'),
('HEAPU32', 'Uint32Array'),
('HEAPF32', 'Float32Array'),
('HEAPF64', 'Float64Array')]:
if asm_backend_uses(metadata, view):
emscripten_replace_memory += ' %s = new %s(newBuffer);\n' % (heap, view)
emscripten_replace_memory += '''
buffer = newBuffer;
return true;
}
'''
return emscripten_replace_memory
def create_asm_end(exports):
if shared.Settings.MINIMAL_RUNTIME and shared.Settings.WASM:
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
''' % (exports)
return '''
return %s;
})
// EMSCRIPTEN_END_ASM
(asmGlobalArg, asmLibraryArg, buffer);
''' % (exports)
def create_first_in_asm():
return ''
def create_memory_views(metadata):
"""Generates memory views for the different heap types.
Generated symbols:
Int8View Int16View Int32View
Uint8View Uint16View Uint32View
Float32View Float64View
"""
ret = '\n'
for info in HEAP_TYPE_INFOS:
heap_name = '{}Array'.format(info.long_name)
access = access_quote(heap_name)
if asm_backend_uses(metadata, heap_name):
format_args = {
'heap': info.heap_name,
'long': info.long_name,
'access': access,
}
ret += ' var {heap} = new global{access}(buffer);\n'.format(**format_args)
return ret
class HeapTypeInfo(object):
"""Struct that holds data for a type of HEAP* views."""
def __init__(self, heap_name, long_name, shift_amount):
assert heap_name.startswith('HEAP')
self.heap_name = heap_name
self.long_name = long_name
self.shift_amount = shift_amount
def short_name(self):
"""The unique part of the heap name for this type.
Derive this from heap_name instead of the other way around so that searching,
e.g. for HEAP8, from the generated JS code leads back here.
"""
return self.heap_name[len('HEAP'):]
def is_int(self):
"""Whether this heap type is an integer type or not."""
return self.short_name()[0] != 'F'
def coerce(self, expression):
"""Adds asm.js type coercion to a string expression."""
if self.is_int():
return expression + '| 0'
else:
return '+' + expression
HEAP_TYPE_INFOS = [
HeapTypeInfo(heap_name='HEAP8', long_name='Int8', shift_amount=0),
HeapTypeInfo(heap_name='HEAP16', long_name='Int16', shift_amount=1),
HeapTypeInfo(heap_name='HEAP32', long_name='Int32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPU8', long_name='Uint8', shift_amount=0),
HeapTypeInfo(heap_name='HEAPU16', long_name='Uint16', shift_amount=1),
HeapTypeInfo(heap_name='HEAPU32', long_name='Uint32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF32', long_name='Float32', shift_amount=2),
HeapTypeInfo(heap_name='HEAPF64', long_name='Float64', shift_amount=3),
]
def emscript_wasm_backend(infile, outfile, memfile, compiler_engine,
temp_files, DEBUG):
# Overview:
# * Run wasm-emscripten-finalize to extract metadata and modify the binary
# to use emscripten's wasm<->JS ABI
# * Use the metadata to generate the JS glue that goes with the wasm
metadata = finalize_wasm(temp_files, infile, outfile, memfile, DEBUG)
update_settings_glue(metadata, DEBUG)
if shared.Settings.SIDE_MODULE:
return
if DEBUG:
logger.debug('emscript: js compiler glue')
if DEBUG:
t = time.time()
glue, forwarded_data = compile_settings(compiler_engine, temp_files)
if DEBUG:
logger.debug(' emscript: glue took %s seconds' % (time.time() - t))
t = time.time()
forwarded_json = json.loads(forwarded_data)
# For the wasm backend the implementedFunctions from compiler.js should
# alwasys be empty. This only gets populated for __asm function when using
# the JS backend.
assert not forwarded_json['Functions']['implementedFunctions']
pre, post = glue.split('// EMSCRIPTEN_END_FUNCS')
# memory and global initializers
global_initializers = ', '.join('{ func: function() { %s() } }' % i for i in metadata['initializers'])
staticbump = shared.Settings.STATIC_BUMP
pre = pre.replace('STATICTOP = STATIC_BASE + 0;', '''STATICTOP = STATIC_BASE + %d;
/* global initializers */ %s __ATINIT__.push(%s);
''' % (staticbump,
'if (!ENVIRONMENT_IS_PTHREAD)' if shared.Settings.USE_PTHREADS else '',
global_initializers))
pre = apply_memory(pre)
pre = apply_static_code_hooks(pre)
if shared.Settings.RELOCATABLE and not shared.Settings.SIDE_MODULE:
pre += 'var gb = GLOBAL_BASE, fb = 0;\n'
# merge forwarded data
shared.Settings.EXPORTED_FUNCTIONS = forwarded_json['EXPORTED_FUNCTIONS']
exports = metadata['exports']
if shared.Settings.ASYNCIFY:
exports += ['asyncify_start_unwind', 'asyncify_stop_unwind', 'asyncify_start_rewind', 'asyncify_stop_rewind']
report_missing_symbols(set([asmjs_mangle(f) for f in exports]), pre)
asm_consts, asm_const_funcs = create_asm_consts_wasm(forwarded_json, metadata)
em_js_funcs = create_em_js(forwarded_json, metadata)
asm_const_pairs = ['%s: %s' % (key, value) for key, value in asm_consts]
asm_const_map = 'var ASM_CONSTS = {\n ' + ', \n '.join(asm_const_pairs) + '\n};\n'
pre = pre.replace(
'// === Body ===',
('// === Body ===\n\n' + asm_const_map +
asstr('\n'.join(asm_const_funcs)) +
'\n'.join(em_js_funcs) + '\n'))
pre = apply_table(pre)
outfile.write(pre)
pre = None
invoke_funcs = metadata['invokeFuncs']
if shared.Settings.RELOCATABLE:
invoke_funcs.append('invoke_X')
try:
del forwarded_json['Variables']['globals']['_llvm_global_ctors'] # not a true variable
except KeyError:
pass
sending = create_sending_wasm(invoke_funcs, forwarded_json, metadata)
receiving = create_receiving_wasm(exports)
module = create_module_wasm(sending, receiving, invoke_funcs, metadata)
write_output_file(outfile, post, module)
module = None
outfile.close()
def remove_trailing_zeros(memfile):
with open(memfile, 'rb') as f:
mem_data = f.read()
end = len(mem_data)
while end > 0 and (mem_data[end - 1] == b'\0' or mem_data[end - 1] == 0):
end -= 1
with open(memfile, 'wb') as f:
f.write(mem_data[:end])
def finalize_wasm(temp_files, infile, outfile, memfile, DEBUG):
wasm_emscripten_finalize = os.path.join(shared.Building.get_binaryen_bin(), 'wasm-emscripten-finalize')
wasm_dis = os.path.join(shared.Building.get_binaryen_bin(), 'wasm-dis')
def debug_copy(src, dst):
if DEBUG:
shutil.copyfile(src, os.path.join(shared.CANONICAL_TEMP_DIR, dst))
if src[-2:] == '.o' or src[-5:] == '.wasm':
tmp = dst + '.wast'
shared.check_call([wasm_dis, src, '-o', os.path.join(shared.CANONICAL_TEMP_DIR, tmp)])
basename = shared.unsuffixed(outfile.name)
wasm = basename + '.wasm'
base_wasm = infile
debug_copy(infile, 'base.wasm')
write_source_map = shared.Settings.DEBUG_LEVEL >= 4
if write_source_map:
base_source_map = base_wasm + '.map'
sourcemap_cmd = [shared.PYTHON, path_from_root('tools', 'wasm-sourcemap.py'),
base_wasm,
'--dwarfdump=' + shared.LLVM_DWARFDUMP,
'-o', base_source_map]
if not shared.Settings.SOURCE_MAP_BASE:
logger.warning("Wasm source map won't be usable in a browser without --source-map-base")
shared.check_call(sourcemap_cmd)
debug_copy(base_source_map, 'base_wasm.map')
cmd = [wasm_emscripten_finalize, base_wasm, '-o', wasm]
# tell binaryen to look at the features section, and if there isn't one, to use MVP
# (which matches what llvm+lld has given us)
cmd += ['--detect-features']
if shared.Settings.DEBUG_LEVEL >= 2 or shared.Settings.PROFILING_FUNCS or shared.Settings.EMIT_SYMBOL_MAP or shared.Settings.ASYNCIFY_WHITELIST or shared.Settings.ASYNCIFY_BLACKLIST:
cmd.append('-g')
if shared.Settings.LEGALIZE_JS_FFI != 1:
cmd.append('--no-legalize-javascript-ffi')
if write_source_map:
cmd.append('--input-source-map=' + base_source_map)
cmd.append('--output-source-map=' + wasm + '.map')
cmd.append('--output-source-map-url=' + shared.Settings.SOURCE_MAP_BASE + os.path.basename(shared.Settings.WASM_BINARY_FILE) + '.map')
if not shared.Settings.MEM_INIT_IN_WASM:
cmd.append('--separate-data-segments=' + memfile)
if shared.Settings.SIDE_MODULE:
cmd.append('--side-module')
else:
# --global-base is used by wasm-emscripten-finalize to calculate the size
# of the static data used. The argument we supply here needs to match the
# global based used by lld (see Building.link_lld). For relocatable this is
# zero for the global base although at runtime __memory_base is used.
# For non-relocatable output we used shared.Settings.GLOBAL_BASE.
# TODO(sbc): Can we remove this argument infer this from the segment
# initializer?
if shared.Settings.RELOCATABLE:
cmd.append('--global-base=0')
else:
cmd.append('--global-base=%s' % shared.Settings.GLOBAL_BASE)
if shared.Settings.SAFE_STACK:
cmd.append('--check-stack-overflow')
if shared.Settings.STANDALONE_WASM:
cmd.append('--standalone-wasm')
shared.print_compiler_stage(cmd)
stdout = shared.check_call(cmd, stdout=subprocess.PIPE).stdout
if write_source_map:
debug_copy(wasm + '.map', 'post_finalize.map')
debug_copy(wasm, 'post_finalize.wasm')
if not shared.Settings.MEM_INIT_IN_WASM:
# we have a separate .mem file. binaryen did not strip any trailing zeros,
# because it's an ABI question as to whether it is valid to do so or not.
# we can do so here, since we make sure to zero out that memory (even in
# the dynamic linking case, our loader zeros it out)
remove_trailing_zeros(memfile)
return load_metadata_wasm(stdout, DEBUG)
def create_asm_consts_wasm(forwarded_json, metadata):
asm_consts = {}
all_sigs = []
for k, v in metadata['asmConsts'].items():
const, sigs, call_types = v
const = asstr(const)
const = trim_asm_const_body(const)
args = []
max_arity = 16
arity = 0
for i in range(max_arity):
if ('$' + str(i)) in const:
arity = i + 1
for i in range(arity):
args.append('$' + str(i))
const = 'function(' + ', '.join(args) + ') {' + const + '}'
asm_consts[int(k)] = const
for sig, call_type in zip(sigs, call_types):
all_sigs.append((sig, call_type))
asm_const_funcs = []
if all_sigs:
# emit the signature-reading helper function only if we have any EM_ASM
# functions in the module
check = ''
if shared.Settings.ASSERTIONS:
check = ' else abort("unexpected char in asm const signature " + ch);'
asm_const_funcs.append(r'''
// Avoid creating a new array
var _readAsmConstArgsArray = [];
function readAsmConstArgs(sigPtr, buf) {
var args = _readAsmConstArgsArray;
args.length = 0;
while (1) {
var ch = HEAPU8[sigPtr++];
if (!ch) return args;
if (ch === 'd'.charCodeAt(0) || ch === 'f'.charCodeAt(0)) {
buf = alignMemory(buf, 8);
args.push(HEAPF64[(buf >> 3)]);
buf += 8;
} else if (ch === 'i'.charCodeAt(0)) {
buf = alignMemory(buf, 4);
args.push(HEAP32[(buf >> 2)]);
buf += 4;
}%s
}
}
''' % check)
for sig, call_type in set(all_sigs):
const_name = '_emscripten_asm_const_' + call_type + sig
forwarded_json['Functions']['libraryFunctions'][const_name] = 1
preamble = ''
if shared.Settings.USE_PTHREADS:
sync_proxy = call_type == 'sync_on_main_thread_'
async_proxy = call_type == 'async_on_main_thread_'
proxied = sync_proxy or async_proxy
if proxied:
# In proxied function calls, positive integers 1, 2, 3, ... denote pointers
# to regular C compiled functions. Negative integers -1, -2, -3, ... denote
# indices to EM_ASM() blocks, so remap the EM_ASM() indices from 0, 1, 2,
# ... over to the negative integers starting at -1.
preamble += ('\n if (ENVIRONMENT_IS_PTHREAD) { ' +
proxy_debug_print(sync_proxy) +
'return _emscripten_proxy_to_main_thread_js(-1 - code, ' +
str(int(sync_proxy)) +
', code, sigPtr, argbuf); }')
if shared.Settings.RELOCATABLE:
# TODO(sbc): remove this conidtion after
# https://github.com/WebAssembly/binaryen/pull/2408 lands
preamble += '\n if (code > %s) code -= %s;\n' % (shared.Settings.GLOBAL_BASE, shared.Settings.GLOBAL_BASE)
asm_const_funcs.append(r'''
function %s(code, sigPtr, argbuf) {%s
var args = readAsmConstArgs(sigPtr, argbuf);
return ASM_CONSTS[code].apply(null, args);
}''' % (const_name, preamble))
asm_consts = [(key, value) for key, value in asm_consts.items()]
asm_consts.sort()
return asm_consts, asm_const_funcs
def create_em_js(forwarded_json, metadata):
em_js_funcs = []
separator = '<::>'
for name, raw in metadata.get('emJsFuncs', {}).items():
assert separator in raw
args, body = raw.split(separator, 1)
args = args[1:-1]
if args == 'void':
args = []
else:
args = args.split(',')
arg_names = [arg.split()[-1].replace("*", "") for arg in args if arg]
func = 'function {}({}){}'.format(name, ','.join(arg_names), asstr(body))
em_js_funcs.append(func)
forwarded_json['Functions']['libraryFunctions'][name] = 1
return em_js_funcs
def add_standard_wasm_imports(send_items_map):
# Normally we import these into the wasm (so that JS could use them even
# before the wasm loads), while in standalone mode we do not depend
# on JS to create them, but create them in the wasm and export them.
if not shared.Settings.STANDALONE_WASM:
memory_import = 'wasmMemory'
if shared.Settings.MODULARIZE and shared.Settings.USE_PTHREADS:
# Pthreads assign wasmMemory in their worker startup. In MODULARIZE mode, they cannot assign inside the
# Module scope, so lookup via Module as well.
memory_import += " || Module['wasmMemory']"
send_items_map['memory'] = memory_import
send_items_map['table'] = 'wasmTable'
# With the wasm backend __memory_base and __table_base and only needed for
# relocatable output.
if shared.Settings.RELOCATABLE or not shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__memory_base'] = str(shared.Settings.GLOBAL_BASE) # tell the memory segments where to place themselves
# the wasm backend reserves slot 0 for the NULL function pointer
table_base = '1' if shared.Settings.WASM_BACKEND else '0'
send_items_map['__table_base'] = table_base
if shared.Settings.RELOCATABLE and shared.Settings.WASM_BACKEND: # FIXME
send_items_map['__stack_pointer'] = 'STACK_BASE'
if shared.Settings.MAYBE_WASM2JS or shared.Settings.AUTODEBUG or shared.Settings.LINKABLE:
# legalization of i64 support code may require these in some modes
send_items_map['setTempRet0'] = 'setTempRet0'
send_items_map['getTempRet0'] = 'getTempRet0'
if shared.Settings.AUTODEBUG:
send_items_map['log_execution'] = '''function(loc) {
console.log('log_execution ' + loc);
}'''
send_items_map['get_i32'] = '''function(loc, index, value) {
console.log('get_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_i64'] = '''function(loc, index, low, high) {
console.log('get_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['get_f32'] = '''function(loc, index, value) {
console.log('get_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_f64'] = '''function(loc, index, value) {
console.log('get_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['get_anyref'] = '''function(loc, index, value) {
console.log('get_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['get_exnref'] = '''function(loc, index, value) {
console.log('get_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i32'] = '''function(loc, index, value) {
console.log('set_i32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_i64'] = '''function(loc, index, low, high) {
console.log('set_i64 ' + [loc, index, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['set_f32'] = '''function(loc, index, value) {
console.log('set_f32 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_f64'] = '''function(loc, index, value) {
console.log('set_f64 ' + [loc, index, value]);
return value;
}'''
send_items_map['set_anyref'] = '''function(loc, index, value) {
console.log('set_anyref ' + [loc, index, value]);
return value;
}'''
send_items_map['set_exnref'] = '''function(loc, index, value) {
console.log('set_exnref ' + [loc, index, value]);
return value;
}'''
send_items_map['load_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('load_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['load_val_i32'] = '''function(loc, value) {
console.log('load_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['load_val_i64'] = '''function(loc, low, high) {
console.log('load_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['load_val_f32'] = '''function(loc, value) {
console.log('loaload_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['load_val_f64'] = '''function(loc, value) {
console.log('load_val_f64 ' + [loc, value]);
return value;
}'''
send_items_map['store_ptr'] = '''function(loc, bytes, offset, ptr) {
console.log('store_ptr ' + [loc, bytes, offset, ptr]);
return ptr;
}'''
send_items_map['store_val_i32'] = '''function(loc, value) {
console.log('store_val_i32 ' + [loc, value]);
return value;
}'''
send_items_map['store_val_i64'] = '''function(loc, low, high) {
console.log('store_val_i64 ' + [loc, low, high]);
setTempRet0(high);
return low;
}'''
send_items_map['store_val_f32'] = '''function(loc, value) {
console.log('loastore_val_i32d_ptr ' + [loc, value]);
return value;
}'''
send_items_map['store_val_f64'] = '''function(loc, value) {
console.log('store_val_f64 ' + [loc, value]);
return value;
}'''
def create_sending_wasm(invoke_funcs, forwarded_json, metadata):
basic_funcs = []
if shared.Settings.SAFE_HEAP:
basic_funcs += ['segfault', 'alignfault']
em_asm_sigs = [zip(sigs, call_types) for _, sigs, call_types in metadata['asmConsts'].values()]
# flatten em_asm_sigs
em_asm_sigs = [sig for sigs in em_asm_sigs for sig in sigs]
em_asm_funcs = ['_emscripten_asm_const_' + call_type + sig for sig, call_type in em_asm_sigs]
em_js_funcs = list(metadata['emJsFuncs'].keys())
declared_items = ['_' + item for item in metadata['declares']]
send_items = set(basic_funcs + invoke_funcs + em_asm_funcs + em_js_funcs + declared_items)
def fix_import_name(g):
if g.startswith('Math_'):
return g.split('_')[1]
# Unlike fastcomp the wasm backend doesn't use the '_' prefix for native
# symbols. Emscripten currently expects symbols to start with '_' so we
# artificially add them to the output of emscripten-wasm-finalize and them
# strip them again here.
# note that we don't do this for EM_JS functions (which, rarely, may have
# a '_' prefix)
if g.startswith('_') and g not in metadata['emJsFuncs']:
return g[1:]
return g
send_items_map = OrderedDict()
for name in send_items:
internal_name = fix_import_name(name)
if internal_name in send_items_map:
exit_with_error('duplicate symbol in exports to wasm: %s', name)
send_items_map[internal_name] = name
add_standard_wasm_imports(send_items_map)
sorted_keys = sorted(send_items_map.keys())
return '{ ' + ', '.join('"' + k + '": ' + send_items_map[k] for k in sorted_keys) + ' }'
def create_receiving_wasm(exports):
receiving = []
if not shared.Settings.ASSERTIONS:
runtime_assertions = ''
else:
runtime_assertions = RUNTIME_ASSERTIONS
# assert on the runtime being in a valid state when calling into compiled code. The only exceptions are
# some support code
for e in exports:
receiving.append('''\
var real_%(mangled)s = asm["%(e)s"];
asm["%(e)s"] = function() {%(assertions)s
return real_%(mangled)s.apply(null, arguments);
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
if not shared.Settings.SWAPPABLE_ASM_MODULE:
for e in exports:
receiving.append('var %(mangled)s = Module["%(mangled)s"] = asm["%(e)s"];' % {'mangled': asmjs_mangle(e), 'e': e})
else:
receiving.append('Module["asm"] = asm;')
for e in exports:
receiving.append('''\
var %(mangled)s = Module["%(mangled)s"] = function() {%(assertions)s
return Module["asm"]["%(e)s"].apply(null, arguments)
};
''' % {'mangled': asmjs_mangle(e), 'e': e, 'assertions': runtime_assertions})
return '\n'.join(receiving) + '\n'
def create_module_wasm(sending, receiving, invoke_funcs, metadata):
invoke_wrappers = create_invoke_wrappers(invoke_funcs)
receiving += create_named_globals(metadata)
receiving += create_fp_accessors(metadata)
module = []
module.append('var asmGlobalArg = {};\n')
if shared.Settings.USE_PTHREADS and not shared.Settings.WASM:
module.append("if (typeof SharedArrayBuffer !== 'undefined') asmGlobalArg['Atomics'] = Atomics;\n")
module.append('var asmLibraryArg = %s;\n' % (sending))
if shared.Settings.ASYNCIFY and shared.Settings.ASSERTIONS:
module.append('Asyncify.instrumentWasmImports(asmLibraryArg);\n')
module.append("var asm = createWasm();\n")
module.append(receiving)
module.append(invoke_wrappers)
return module
def load_metadata_wasm(metadata_raw, DEBUG):
try:
metadata_json = json.loads(metadata_raw)
except Exception:
logger.error('emscript: failure to parse metadata output from wasm-emscripten-finalize. raw output is: \n' + metadata_raw)
raise
metadata = {
'aliases': {},
'declares': [],
'implementedFunctions': [],
'externs': [],
'simd': False,
'maxGlobalAlign': 0,
'staticBump': 0,
'tableSize': 0,
'initializers': [],
'exports': [],
'namedGlobals': {},
'emJsFuncs': {},
'asmConsts': {},
'invokeFuncs': [],
'features': [],
'mainReadsParams': 1,
}
assert 'tableSize' in metadata_json.keys()
for key, value in metadata_json.items():
# json.loads returns `unicode` for strings but other code in this file
# generally works with utf8 encoded `str` objects, and they don't alwasy
# mix well. e.g. s.replace(x, y) will blow up is `s` a uts8 str containing
# non-ascii and either x or y are unicode objects.
# TODO(sbc): Remove this encoding if we switch to unicode elsewhere
# (specifically the glue returned from compile_settings)
if type(value) == list:
value = [asstr(v) for v in value]
if key not in metadata:
exit_with_error('unexpected metadata key received from wasm-emscripten-finalize: %s', key)
metadata[key] = value
# Initializers call the global var version of the export, so they get the mangled name.
metadata['initializers'] = [asmjs_mangle(i) for i in metadata['initializers']]
if DEBUG:
logger.debug("Metadata parsed: " + pprint.pformat(metadata))
# Calculate the subset of exports that were explicitly marked with llvm.used.
# These are any exports that were not requested on the command line and are
# not known auto-generated system functions.
unexpected_exports = [e for e in metadata['exports'] if treat_as_user_function(e)]
unexpected_exports = [asmjs_mangle(e) for e in unexpected_exports]
unexpected_exports = [e for e in unexpected_exports if e not in shared.Settings.EXPORTED_FUNCTIONS]
shared.Building.user_requested_exports += unexpected_exports
return metadata
def create_invoke_wrappers(invoke_funcs):
"""Asm.js-style exception handling: invoke wrapper generation."""
invoke_wrappers = ''
for invoke in invoke_funcs:
sig = invoke[len('invoke_'):]
invoke_wrappers += '\n' + shared.JS.make_invoke(sig) + '\n'
return invoke_wrappers
def treat_as_user_function(name):
library_functions_in_module = ('setTempRet0', 'getTempRet0', 'stackAlloc',
'stackSave', 'stackRestore',
'establishStackSpace', '__growWasmMemory',
'__heap_base', '__data_end')
if name.startswith('dynCall_'):
return False
if name in library_functions_in_module:
return False
return True
def asmjs_mangle(name):
"""Mangle a name the way asm.js/JSBackend globals are mangled.
Prepends '_' and replaces non-alphanumerics with '_'.
Used by wasm backend for JS library consistency with asm.js.
"""
if treat_as_user_function(name):
return '_' + name
else:
return name
def normalize_line_endings(text):
"""Normalize to UNIX line endings.
On Windows, writing to text file will duplicate \r\n to \r\r\n otherwise.
"""
if WINDOWS:
return text.replace('\r\n', '\n')
return text
def run(infile, outfile, memfile):
temp_files = get_configuration().get_temp_files()
infile, outfile = substitute_response_files([infile, outfile])
if not shared.Settings.BOOTSTRAPPING_STRUCT_INFO:
generated_struct_info_name = 'generated_struct_info.json'
def generate_struct_info():
with ToolchainProfiler.profile_block('gen_struct_info'):
out = shared.Cache.get_path(generated_struct_info_name)
gen_struct_info.main(['-q', '-c', '-o', out])
return out
shared.Settings.STRUCT_INFO = shared.Cache.get(generated_struct_info_name, generate_struct_info)
# do we need an else, to define it for the bootstrap case?
outfile_obj = open(outfile, 'w')
emscripter = emscript_wasm_backend if shared.Settings.WASM_BACKEND else emscript_fastcomp
return temp_files.run_and_clean(lambda: emscripter(
infile, outfile_obj, memfile, shared.NODE_JS, temp_files, shared.DEBUG)
)
| 39.447472
| 256
| 0.6838
|
3ef040c937a3665d5d0fe4fb0aa5c9d5717f92b7
| 3,928
|
py
|
Python
|
GameBoard.py
|
DebugScientist78/domino_game_ics4ur
|
008bca1efc528290cfa1f54e3dc8506888ebda53
|
[
"MIT"
] | null | null | null |
GameBoard.py
|
DebugScientist78/domino_game_ics4ur
|
008bca1efc528290cfa1f54e3dc8506888ebda53
|
[
"MIT"
] | null | null | null |
GameBoard.py
|
DebugScientist78/domino_game_ics4ur
|
008bca1efc528290cfa1f54e3dc8506888ebda53
|
[
"MIT"
] | null | null | null |
import DominosPile
import random
import GameEngine
class Deque:
def __init__(self, ls=[]):
self.deq = ls[:]
def appendR(self, data):
self.deq.append(data)
def appendL(self, data):
ls = [data]
self.deq = ls + self.deq
def popL(self):
self.deq.pop(0)
def popR(self):
self.deq.pop(-1)
def getL(self):
return self.deq[0]
def getR(self):
return self.deq[-1]
def empty(self):
self.deq.clear()
class GameBoard:
board = Deque()
pile = DominosPile.DominosPile()
@staticmethod
def getDomExpr(a, b):
'''
Assume a > b, put the two integers into a string domino form
'''
return str(a)+'|'+str(b)
@staticmethod
def getRanDomExpr():
a = random.randint(0,12)
b = random.randint(0,12)
#order the ints such that the larger is in front
if b > a:
return GameBoard.getDomExpr(b,a)
return GameBoard.getDomExpr(a,b)
@staticmethod
def getDomNum(expr):
#returns the ints from a domino string
a,b = expr.split("|")
a = int(a)
b = int(b)
return a,b
@staticmethod
def grabDom():
'''
Grabs 1 domino from the pile and return them
'''
expr = GameBoard.getRanDomExpr()
while True:
#prevent dupes
if GameBoard.pile.dom_dict[expr] == "v":
expr = GameBoard.getRanDomExpr()
else:
break
GameBoard.pile.dom_dict[expr] = "v"
return expr
@staticmethod
def grabStartHand():
'''
Grab 7 dominos from the pile and return them as a list
'''
ls = []
for x in range(7):
expr = GameBoard.grabDom()
ls.append(expr)
return ls
@staticmethod
def getRemainPile():
ls = []
for key in GameBoard.pile.dom_dict:
if GameBoard.pile.dom_dict[key] != 'v':
ls.append(key)
if len(ls) == 0: return False
return ls
@staticmethod
def validDominos(hand):
'''
given an string array of dominos, check to see if any of them are valid to play
return False if none, return a list of valid options
'''
l = GameBoard.getDomNum(GameBoard.board.getL())[0]
r = GameBoard.getDomNum(GameBoard.board.getR())[1]
#print(l, r)
ls = []
for x in hand:
a, b = GameBoard.getDomNum(x)
if a == l or a == r:
ls.append(x)
elif b == l or b == r:
ls.append(x)
if len(ls) == 0: return False
return ls
@staticmethod
def putDomino(dom):
'''
Determine which side is valid, after a side is chosen, determine if a flip is needed
'''
l = GameBoard.getDomNum(GameBoard.board.getL())[0]
r = GameBoard.getDomNum(GameBoard.board.getR())[1]
a,b = GameBoard.getDomNum(dom)
while True:
val = GameEngine.GameEngine.retriveInput("Pick:\n1) Left\n2) Right\n", True, '')
if val == 1:
if l == a or l == b:
if a == l: GameBoard.board.appendL(GameBoard.getDomExpr(b,a))
else: GameBoard.board.appendL(dom)
break
else: print("You can't play this domino on the left side")
elif val == 2:
if r == a or r == b:
if b == r: GameBoard.board.appendR(GameBoard.getDomExpr(b,a))
else: GameBoard.board.appendR(dom)
break
else: print("You can't play this domino on the right side")
else:
print("Please enter 1 or 2")
@staticmethod
def displayBoard():
print("The Board: " + GameBoard.board.deq.__str__())
| 28.882353
| 92
| 0.520876
|
4730601d4a38652a9820f1fecb64401fd75644fa
| 1,538
|
py
|
Python
|
exportplan/migrations/0004_exportplanactions.py
|
uktrade/directory-api
|
45a9024a7ecc2842895201cbb51420ba9e57a168
|
[
"MIT"
] | 2
|
2017-06-02T09:09:08.000Z
|
2021-01-18T10:26:53.000Z
|
exportplan/migrations/0004_exportplanactions.py
|
uktrade/directory-api
|
45a9024a7ecc2842895201cbb51420ba9e57a168
|
[
"MIT"
] | 629
|
2016-10-10T09:35:52.000Z
|
2022-03-25T15:04:04.000Z
|
exportplan/migrations/0004_exportplanactions.py
|
uktrade/directory-api
|
45a9024a7ecc2842895201cbb51420ba9e57a168
|
[
"MIT"
] | 5
|
2017-06-22T10:02:22.000Z
|
2022-03-14T17:55:21.000Z
|
# Generated by Django 2.2.10 on 2020-03-13 15:30
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('exportplan', '0003_auto_20200219_1332'),
]
operations = [
migrations.CreateModel(
name='ExportPlanActions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, null=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, null=True, verbose_name='modified')),
('owner', models.PositiveIntegerField(default=None, null=True, verbose_name='sso user.sso_id')),
('due_date', models.DateField(blank=True, null=True)),
('is_reminders_on', models.BooleanField(default=False)),
('action_type', models.CharField(choices=[('TARGET_MARKETS', 'Target Markets')], default='TARGET_MARKETS', max_length=15)),
('companyexportplan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='actions', to='exportplan.CompanyExportPlan')),
],
options={
'ordering': ('-modified', '-created'),
'get_latest_by': 'modified',
'abstract': False,
},
),
]
| 45.235294
| 161
| 0.630689
|
bc369cdfbdb3e5d38f82021702458d4894c1ea42
| 17,729
|
py
|
Python
|
gui/text.py
|
cmatomic/electrum-cesc
|
8e32dee4ff13cbc07646de632d49272636f936c0
|
[
"MIT"
] | null | null | null |
gui/text.py
|
cmatomic/electrum-cesc
|
8e32dee4ff13cbc07646de632d49272636f936c0
|
[
"MIT"
] | 2
|
2015-09-13T19:53:35.000Z
|
2020-01-26T23:48:04.000Z
|
gui/text.py
|
cmatomic/electrum-cesc
|
8e32dee4ff13cbc07646de632d49272636f936c0
|
[
"MIT"
] | 4
|
2016-10-07T17:21:11.000Z
|
2021-12-20T05:21:24.000Z
|
import tty, sys
import curses, datetime, locale
from decimal import Decimal
from electrum_cesc.util import format_satoshis, set_verbosity
from electrum_cesc.util import StoreDict
from electrum_cesc.bitcoin import is_valid, COIN, TYPE_ADDRESS
from electrum_cesc import Wallet, WalletStorage
_ = lambda x:x
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists:
print "Wallet not found. try 'electrum-cesc create'"
exit()
self.wallet = Wallet(storage)
self.wallet.start_threads(self.network)
self.contacts = StoreDict(self.config, 'contacts')
locale.setlocale(locale.LC_ALL, '')
self.encoding = locale.getpreferredencoding()
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE);
self.stdscr.keypad(1)
self.stdscr.border(0)
self.maxy, self.maxx = self.stdscr.getmaxyx()
self.set_cursor(0)
self.w = curses.newwin(10, 50, 5, 5)
set_verbosity(False)
self.tab = 0
self.pos = 0
self.popup_pos = 0
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.history = None
if self.network:
self.network.register_callback(self.update, ['updated'])
self.tab_names = [_("History"), _("Send"), _("Receive"), _("Addresses"), _("Contacts"), _("Banner")]
self.num_tabs = len(self.tab_names)
def set_cursor(self, x):
try:
curses.curs_set(x)
except Exception:
pass
def restore_or_create(self):
pass
def verify_seed(self):
pass
def get_string(self, y, x):
self.set_cursor(1)
curses.echo()
self.stdscr.addstr( y, x, " "*20, curses.A_REVERSE)
s = self.stdscr.getstr(y,x)
curses.noecho()
self.set_cursor(0)
return s
def update(self, event):
self.update_history()
if self.tab == 0:
self.print_history()
self.refresh()
def print_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
if self.history is None:
self.update_history()
self.print_list(self.history[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def update_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
self.history = []
for item in self.wallet.get_history():
tx_hash, height, conf, timestamp, value, balance = item
if conf:
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "------"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
if len(label) > 40:
label = label[0:37] + '...'
self.history.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
def print_balance(self):
if not self.network:
msg = _("Offline")
elif self.network.is_connected():
if not self.wallet.up_to_date:
msg = _("Synchronizing...")
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _("Not connected")
self.stdscr.addstr( self.maxy -1, 3, msg)
for i in range(self.num_tabs):
self.stdscr.addstr( 0, 2 + 2*i + len(''.join(self.tab_names[0:i])), ' '+self.tab_names[i]+' ', curses.A_BOLD if self.tab == i else 0)
self.stdscr.addstr(self.maxy -1, self.maxx-30, ' '.join([_("Settings"), _("Network"), _("Quit")]))
def print_receive(self):
addr = self.wallet.get_unused_address(None)
self.stdscr.addstr(2, 1, "Address: "+addr)
self.print_qr(addr)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %15s "%("Key", "Value"))
def print_addresses(self):
fmt = "%-35s %-30s"
messages = map(lambda addr: fmt % (addr, self.wallet.labels.get(addr,"")), self.wallet.addresses())
self.print_list(messages, fmt % ("Address", "Label"))
def print_edit_line(self, y, label, text, index, size):
text += " "*(size - len(text) )
self.stdscr.addstr( y, 2, label)
self.stdscr.addstr( y, 15, text, curses.A_REVERSE if self.pos%6==index else curses.color_pair(1))
def print_send_tab(self):
self.stdscr.clear()
self.print_edit_line(3, _("Pay to"), self.str_recipient, 0, 40)
self.print_edit_line(5, _("Description"), self.str_description, 1, 40)
self.print_edit_line(7, _("Amount"), self.str_amount, 2, 15)
self.print_edit_line(9, _("Fee"), self.str_fee, 3, 15)
self.stdscr.addstr( 12, 15, _("[Send]"), curses.A_REVERSE if self.pos%6==4 else curses.color_pair(2))
self.stdscr.addstr( 12, 25, _("[Clear]"), curses.A_REVERSE if self.pos%6==5 else curses.color_pair(2))
def print_banner(self):
if self.network:
self.print_list( self.network.banner.split('\n'))
def print_qr(self, data):
import qrcode, StringIO
s = StringIO.StringIO()
self.qr = qrcode.QRCode()
self.qr.add_data(data)
self.qr.print_ascii(out=s, invert=False)
msg = s.getvalue()
lines = msg.split('\n')
for i, l in enumerate(lines):
l = l.encode("utf-8")
self.stdscr.addstr(i+5, 5, l, curses.color_pair(3))
def print_list(self, list, firstline = None):
self.maxpos = len(list)
if not self.maxpos: return
if firstline:
firstline += " "*(self.maxx -2 - len(firstline))
self.stdscr.addstr( 1, 1, firstline )
for i in range(self.maxy-4):
msg = list[i] if i < len(list) else ""
msg += " "*(self.maxx - 2 - len(msg))
m = msg[0:self.maxx - 2]
m = m.encode(self.encoding)
self.stdscr.addstr( i+2, 1, m, curses.A_REVERSE if i == (self.pos % self.maxpos) else 0)
def refresh(self):
if self.tab == -1: return
self.stdscr.border(0)
self.print_balance()
self.stdscr.refresh()
def main_command(self):
c = self.stdscr.getch()
print c
if c == curses.KEY_RIGHT: self.tab = (self.tab + 1)%self.num_tabs
elif c == curses.KEY_LEFT: self.tab = (self.tab - 1)%self.num_tabs
elif c == curses.KEY_DOWN: self.pos +=1
elif c == curses.KEY_UP: self.pos -= 1
elif c == 9: self.pos +=1 # tab
elif curses.unctrl(c) in ['^W', '^C', '^X', '^Q']: self.tab = -1
elif curses.unctrl(c) in ['^N']: self.network_dialog()
elif curses.unctrl(c) == '^S': self.settings_dialog()
else: return c
if self.pos<0: self.pos=0
if self.pos>=self.maxpos: self.pos=self.maxpos - 1
def run_tab(self, i, print_func, exec_func):
while self.tab == i:
self.stdscr.clear()
print_func()
self.refresh()
c = self.main_command()
if c: exec_func(c)
def run_history_tab(self, c):
if c == 10:
out = self.run_popup('',["blah","foo"])
def edit_str(self, target, c, is_num=False):
# detect backspace
if c in [8, 127, 263] and target:
target = target[:-1]
elif not is_num or curses.unctrl(c) in '0123456789.':
target += curses.unctrl(c)
return target
def run_send_tab(self, c):
if self.pos%6 == 0:
self.str_recipient = self.edit_str(self.str_recipient, c)
if self.pos%6 == 1:
self.str_description = self.edit_str(self.str_description, c)
if self.pos%6 == 2:
self.str_amount = self.edit_str(self.str_amount, c, True)
elif self.pos%6 == 3:
self.str_fee = self.edit_str(self.str_fee, c, True)
elif self.pos%6==4:
if c == 10: self.do_send()
elif self.pos%6==5:
if c == 10: self.do_clear()
def run_receive_tab(self, c):
if c == 10:
out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
def run_contacts_tab(self, c):
if c == 10 and self.contacts:
out = self.run_popup('Adress', ["Copy", "Pay to", "Edit label", "Delete"]).get('button')
key = self.contacts.keys()[self.pos%len(self.contacts.keys())]
if out == "Pay to":
self.tab = 1
self.str_recipient = key
self.pos = 2
elif out == "Edit label":
s = self.get_string(6 + self.pos, 18)
if s:
self.wallet.labels[address] = s
def run_banner_tab(self, c):
self.show_message(repr(c))
pass
def main(self):
tty.setraw(sys.stdin)
while self.tab != -1:
self.run_tab(0, self.print_history, self.run_history_tab)
self.run_tab(1, self.print_send_tab, self.run_send_tab)
self.run_tab(2, self.print_receive, self.run_receive_tab)
self.run_tab(3, self.print_addresses, self.run_banner_tab)
self.run_tab(4, self.print_contacts, self.run_contacts_tab)
self.run_tab(5, self.print_banner, self.run_banner_tab)
tty.setcbreak(sys.stdin)
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def do_clear(self):
self.str_amount = ''
self.str_recipient = ''
self.str_fee = ''
self.str_description = ''
def do_send(self):
if not is_valid(self.str_recipient):
self.show_message(_('Invalid Cryptoescudo address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
self.show_message(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
self.show_message(_('Invalid Fee'))
return
if self.wallet.use_encryption:
password = self.password_dialog()
if not password:
return
else:
password = None
try:
tx = self.wallet.mktx([(TYPE_ADDRESS, self.str_recipient, amount)], password, self.config, fee)
except Exception as e:
self.show_message(str(e))
return
if self.str_description:
self.wallet.labels[tx.hash()] = self.str_description
self.show_message(_("Please wait..."), getchar=False)
status, msg = self.network.broadcast(tx)
if status:
self.show_message(_('Payment sent.'))
self.do_clear()
#self.update_contacts_tab()
else:
self.show_message(_('Error'))
def show_message(self, message, getchar = True):
w = self.w
w.clear()
w.border(0)
for i, line in enumerate(message.split('\n')):
w.addstr(2+i,2,line)
w.refresh()
if getchar: c = self.stdscr.getch()
def run_popup(self, title, items):
return self.run_dialog(title, map(lambda x: {'type':'button','label':x}, items), interval=1, y_pos = self.pos+3)
def network_dialog(self):
if not self.network:
return
params = self.network.get_parameters()
host, port, protocol, proxy_config, auto_connect = params
srv = 'auto-connect' if auto_connect else self.network.default_server
out = self.run_dialog('Network', [
{'label':'server', 'type':'str', 'value':srv},
{'label':'proxy', 'type':'str', 'value':self.config.get('proxy', '')},
], buttons = 1)
if out:
if out.get('server'):
server = out.get('server')
auto_connect = server == 'auto-connect'
if not auto_connect:
try:
host, port, protocol = server.split(':')
except Exception:
self.show_message("Error:" + server + "\nIn doubt, type \"auto-connect\"")
return False
proxy = self.parse_proxy_options(out.get('proxy')) if out.get('proxy') else None
self.network.set_parameters(host, port, protocol, proxy, auto_connect)
def settings_dialog(self):
fee = str(Decimal(self.wallet.fee_per_kb(self.config)) / COIN)
out = self.run_dialog('Settings', [
{'label':'Default fee', 'type':'satoshis', 'value': fee }
], buttons = 1)
if out:
if out.get('Default fee'):
fee = int(Decimal(out['Default fee']) * COIN)
self.config.set_key('fee_per_kb', fee, True)
def password_dialog(self):
out = self.run_dialog('Password', [
{'label':'Password', 'type':'password', 'value':''}
], buttons = 1)
return out.get('Password')
def run_dialog(self, title, items, interval=2, buttons=None, y_pos=3):
self.popup_pos = 0
self.w = curses.newwin( 5 + len(items)*interval + (2 if buttons else 0), 50, y_pos, 5)
w = self.w
out = {}
while True:
w.clear()
w.border(0)
w.addstr( 0, 2, title)
num = len(items)
numpos = num
if buttons: numpos += 2
for i in range(num):
item = items[i]
label = item.get('label')
if item.get('type') == 'list':
value = item.get('value','')
elif item.get('type') == 'satoshis':
value = item.get('value','')
elif item.get('type') == 'str':
value = item.get('value','')
elif item.get('type') == 'password':
value = '*'*len(item.get('value',''))
else:
value = ''
if value is None:
value = ''
if len(value)<20:
value += ' '*(20-len(value))
if item.has_key('value'):
w.addstr( 2+interval*i, 2, label)
w.addstr( 2+interval*i, 15, value, curses.A_REVERSE if self.popup_pos%numpos==i else curses.color_pair(1) )
else:
w.addstr( 2+interval*i, 2, label, curses.A_REVERSE if self.popup_pos%numpos==i else 0)
if buttons:
w.addstr( 5+interval*i, 10, "[ ok ]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-2) else curses.color_pair(2))
w.addstr( 5+interval*i, 25, "[cancel]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-1) else curses.color_pair(2))
w.refresh()
c = self.stdscr.getch()
if c in [ord('q'), 27]: break
elif c in [curses.KEY_LEFT, curses.KEY_UP]: self.popup_pos -= 1
elif c in [curses.KEY_RIGHT, curses.KEY_DOWN]: self.popup_pos +=1
else:
i = self.popup_pos%numpos
if buttons and c==10:
if i == numpos-2:
return out
elif i == numpos -1:
return {}
item = items[i]
_type = item.get('type')
if _type == 'str':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item.get('value')
elif _type == 'password':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item ['value']
elif _type == 'satoshis':
item['value'] = self.edit_str(item['value'], c, True)
out[item.get('label')] = item.get('value')
elif _type == 'list':
choices = item.get('choices')
try:
j = choices.index(item.get('value'))
except Exception:
j = 0
new_choice = choices[(j + 1)% len(choices)]
item['value'] = new_choice
out[item.get('label')] = item.get('value')
elif _type == 'button':
out['button'] = item.get('label')
break
return out
| 36.107943
| 151
| 0.529754
|
08f08986951a902a331011b272ada03c89845c7d
| 13,375
|
py
|
Python
|
salt/states/boto_cloudtrail.py
|
ronnix/salt
|
00ffa294ad5842786e9da9cb4fc70955f53299fa
|
[
"Apache-2.0"
] | null | null | null |
salt/states/boto_cloudtrail.py
|
ronnix/salt
|
00ffa294ad5842786e9da9cb4fc70955f53299fa
|
[
"Apache-2.0"
] | null | null | null |
salt/states/boto_cloudtrail.py
|
ronnix/salt
|
00ffa294ad5842786e9da9cb4fc70955f53299fa
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Manage CloudTrail Objects
=================
.. versionadded:: 2016.3.0
Create and destroy CloudTrail objects. Be aware that this interacts with Amazon's services,
and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
This module accepts explicit vpc credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
vpc.keyid: GKTADJGHEIQSXMKKRBJ08H
vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile,
either passed in as a dict, or as a string to pull from pillars or minion
config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
Ensure trail exists:
boto_cloudtrail.present:
- Name: mytrail
- S3BucketName: mybucket
- S3KeyPrefix: prefix
- region: us-east-1
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import os
import os.path
# Import Salt Libs
import salt.utils
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if boto is available.
'''
return 'boto_cloudtrail' if 'boto_cloudtrail.exists' in __salt__ else False
def present(name, Name,
S3BucketName, S3KeyPrefix=None,
SnsTopicName=None,
IncludeGlobalServiceEvents=True,
#IsMultiRegionTrail=None,
EnableLogFileValidation=False,
CloudWatchLogsLogGroupArn=None,
CloudWatchLogsRoleArn=None,
KmsKeyId=None,
LoggingEnabled=True,
Tags=None,
region=None, key=None, keyid=None, profile=None):
'''
Ensure trail exists.
name
The name of the state definition
Name
Name of the trail.
S3BucketName
Specifies the name of the Amazon S3 bucket designated for publishing log
files.
S3KeyPrefix
Specifies the Amazon S3 key prefix that comes after the name of the
bucket you have designated for log file delivery.
SnsTopicName
Specifies the name of the Amazon SNS topic defined for notification of
log file delivery. The maximum length is 256 characters.
IncludeGlobalServiceEvents
Specifies whether the trail is publishing events from global services
such as IAM to the log files.
EnableLogFileValidation
Specifies whether log file integrity validation is enabled. The default
is false.
CloudWatchLogsLogGroupArn
Specifies a log group name using an Amazon Resource Name (ARN), a unique
identifier that represents the log group to which CloudTrail logs will
be delivered. Not required unless you specify CloudWatchLogsRoleArn.
CloudWatchLogsRoleArn
Specifies the role for the CloudWatch Logs endpoint to assume to write
to a user's log group.
KmsKeyId
Specifies the KMS key ID to use to encrypt the logs delivered by
CloudTrail. The value can be a an alias name prefixed by "alias/", a
fully specified ARN to an alias, a fully specified ARN to a key, or a
globally unique identifier.
LoggingEnabled
Whether logging should be enabled for the trail
Tags
A dictionary of tags that should be set on the trail
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
'''
ret = {'name': Name,
'result': True,
'comment': '',
'changes': {}
}
r = __salt__['boto_cloudtrail.exists'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create trail: {0}.'.format(r['error']['message'])
return ret
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'CloudTrail {0} is set to be created.'.format(Name)
ret['result'] = None
return ret
r = __salt__['boto_cloudtrail.create'](Name=Name,
S3BucketName=S3BucketName,
S3KeyPrefix=S3KeyPrefix,
SnsTopicName=SnsTopicName,
IncludeGlobalServiceEvents=IncludeGlobalServiceEvents,
#IsMultiRegionTrail=IsMultiRegionTrail,
EnableLogFileValidation=EnableLogFileValidation,
CloudWatchLogsLogGroupArn=CloudWatchLogsLogGroupArn,
CloudWatchLogsRoleArn=CloudWatchLogsRoleArn,
KmsKeyId=KmsKeyId,
region=region, key=key, keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create trail: {0}.'.format(r['error']['message'])
return ret
_describe = __salt__['boto_cloudtrail.describe'](Name,
region=region, key=key, keyid=keyid, profile=profile)
ret['changes']['old'] = {'trail': None}
ret['changes']['new'] = _describe
ret['comment'] = 'CloudTrail {0} created.'.format(Name)
if LoggingEnabled:
r = __salt__['boto_cloudtrail.start_logging'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create trail: {0}.'.format(r['error']['message'])
ret['changes'] = {}
return ret
ret['changes']['new']['trail']['LoggingEnabled'] = True
else:
ret['changes']['new']['trail']['LoggingEnabled'] = False
if bool(Tags):
r = __salt__['boto_cloudtrail.add_tags'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile, **Tags)
if not r.get('tagged'):
ret['result'] = False
ret['comment'] = 'Failed to create trail: {0}.'.format(r['error']['message'])
ret['changes'] = {}
return ret
ret['changes']['new']['trail']['Tags'] = Tags
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'CloudTrail {0} is present.'.format(Name)])
ret['changes'] = {}
# trail exists, ensure config matches
_describe = __salt__['boto_cloudtrail.describe'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in _describe:
ret['result'] = False
ret['comment'] = 'Failed to update trail: {0}.'.format(_describe['error']['message'])
ret['changes'] = {}
return ret
_describe = _describe.get('trail')
r = __salt__['boto_cloudtrail.status'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile)
_describe['LoggingEnabled'] = r.get('trail', {}).get('IsLogging', False)
need_update = False
for invar, outvar in {'S3BucketName': 'S3BucketName',
'S3KeyPrefix': 'S3KeyPrefix',
'SnsTopicName': 'SnsTopicName',
'IncludeGlobalServiceEvents': 'IncludeGlobalServiceEvents',
#'IsMultiRegionTrail': 'IsMultiRegionTrail',
'EnableLogFileValidation': 'LogFileValidationEnabled',
'CloudWatchLogsLogGroupArn': 'CloudWatchLogsLogGroupArn',
'CloudWatchLogsRoleArn': 'CloudWatchLogsRoleArn',
'KmsKeyId': 'KmsKeyId',
'LoggingEnabled': 'LoggingEnabled'}.iteritems():
if _describe[outvar] != locals()[invar]:
need_update = True
ret['changes'].setdefault('new', {})[invar] = locals()[invar]
ret['changes'].setdefault('old', {})[invar] = _describe[outvar]
r = __salt__['boto_cloudtrail.list_tags'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile)
_describe['Tags'] = r.get('tags', {})
tagchange = salt.utils.compare_dicts(_describe['Tags'], Tags)
if bool(tagchange):
need_update = True
ret['changes'].setdefault('new', {})['Tags'] = Tags
ret['changes'].setdefault('old', {})['Tags'] = _describe['Tags']
if need_update:
if __opts__['test']:
msg = 'CloudTrail {0} set to be modified.'.format(Name)
ret['comment'] = msg
ret['result'] = None
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'CloudTrail to be modified'])
r = __salt__['boto_cloudtrail.update'](Name=Name,
S3BucketName=S3BucketName,
S3KeyPrefix=S3KeyPrefix,
SnsTopicName=SnsTopicName,
IncludeGlobalServiceEvents=IncludeGlobalServiceEvents,
#IsMultiRegionTrail=IsMultiRegionTrail,
EnableLogFileValidation=EnableLogFileValidation,
CloudWatchLogsLogGroupArn=CloudWatchLogsLogGroupArn,
CloudWatchLogsRoleArn=CloudWatchLogsRoleArn,
KmsKeyId=KmsKeyId,
region=region, key=key, keyid=keyid, profile=profile)
if not r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update trail: {0}.'.format(r['error']['message'])
ret['changes'] = {}
return ret
if LoggingEnabled:
r = __salt__['boto_cloudtrail.start_logging'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile)
if not r.get('started'):
ret['result'] = False
ret['comment'] = 'Failed to update trail: {0}.'.format(r['error']['message'])
ret['changes'] = {}
return ret
else:
r = __salt__['boto_cloudtrail.stop_logging'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile)
if not r.get('stopped'):
ret['result'] = False
ret['comment'] = 'Failed to update trail: {0}.'.format(r['error']['message'])
ret['changes'] = {}
return ret
if bool(tagchange):
adds = {}
removes = {}
for k, diff in tagchange.iteritems():
if diff.get('new', '') != '':
# there's an update for this key
adds[k] = Tags[k]
elif diff.get('old', '') != '':
removes[k] = _describe['Tags'][k]
if bool(adds):
r = __salt__['boto_cloudtrail.add_tags'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile, **adds)
if bool(removes):
r = __salt__['boto_cloudtrail.remove_tags'](Name=Name,
region=region, key=key, keyid=keyid, profile=profile,
**removes)
return ret
def absent(name, Name,
region=None, key=None, keyid=None, profile=None):
'''
Ensure trail with passed properties is absent.
name
The name of the state definition.
Name
Name of the trail.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
'''
ret = {'name': Name,
'result': True,
'comment': '',
'changes': {}
}
r = __salt__['boto_cloudtrail.exists'](Name,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to delete trail: {0}.'.format(r['error']['message'])
return ret
if r and not r['exists']:
ret['comment'] = 'CloudTrail {0} does not exist.'.format(Name)
return ret
if __opts__['test']:
ret['comment'] = 'CloudTrail {0} is set to be removed.'.format(Name)
ret['result'] = None
return ret
r = __salt__['boto_cloudtrail.delete'](Name,
region=region, key=key,
keyid=keyid, profile=profile)
if not r['deleted']:
ret['result'] = False
ret['comment'] = 'Failed to delete trail: {0}.'.format(r['error']['message'])
return ret
ret['changes']['old'] = {'trail': Name}
ret['changes']['new'] = {'trail': None}
ret['comment'] = 'CloudTrail {0} deleted.'.format(Name)
return ret
| 36.345109
| 97
| 0.584822
|
3632b2b7af57e14d43cefebd02bbbfb120cdfdee
| 1,328
|
py
|
Python
|
Dino.py
|
ashishcssom/small-projects
|
389be06c5ba0e37fd4164a34e2278210f77903fb
|
[
"MIT"
] | null | null | null |
Dino.py
|
ashishcssom/small-projects
|
389be06c5ba0e37fd4164a34e2278210f77903fb
|
[
"MIT"
] | null | null | null |
Dino.py
|
ashishcssom/small-projects
|
389be06c5ba0e37fd4164a34e2278210f77903fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun May 24 21:19:15 2020
@author: imash
"""
import pyautogui # pip install pyautogui
from PIL import Image, ImageGrab # pip install pillow
#from numpy import asarray
import time
def hit(key):
pyautogui.keyDown(key)
return
def isCollide(data):
# Draw the rectangle for birds
for i in range(300, 415):
for j in range(410, 563):
if data[i, j] < 100:
hit("down")
return
for i in range(300, 415):
for j in range(563, 650):
if data[i, j] < 100:
hit("up")
return
return
if __name__ == "__main__":
print("Hey.. Dino game about to start in 3 seconds")
time.sleep(2)
# hit('up')
while True:
image = ImageGrab.grab().convert('L')
data = image.load()
isCollide(data)
#print(asarray(image))
'''
# Draw the rectangle for cactus
for i in range(275, 325):
for j in range(563, 650):
data[i, j] = 0
# Draw the rectangle for birds
for i in range(250, 300):
for j in range(410, 563):
data[i, j] = 171
image.show()
break
'''
| 22.896552
| 57
| 0.487199
|
d13ff84a8aca63d0c819f985845febf2082d2d39
| 605
|
py
|
Python
|
physionet-django/user/migrations/0014_credentialapplication_project_of_interest.py
|
Lucas-Mc/physionet-build
|
77da5da6273cf3f5f2afce95dc5d0ce3302741ca
|
[
"BSD-3-Clause"
] | 36
|
2019-02-14T18:10:39.000Z
|
2022-01-21T12:48:52.000Z
|
physionet-django/user/migrations/0014_credentialapplication_project_of_interest.py
|
Lucas-Mc/physionet-build
|
77da5da6273cf3f5f2afce95dc5d0ce3302741ca
|
[
"BSD-3-Clause"
] | 1,051
|
2019-01-31T18:03:14.000Z
|
2022-03-31T20:53:04.000Z
|
physionet-django/user/migrations/0014_credentialapplication_project_of_interest.py
|
Lucas-Mc/physionet-build
|
77da5da6273cf3f5f2afce95dc5d0ce3302741ca
|
[
"BSD-3-Clause"
] | 13
|
2019-03-26T11:02:32.000Z
|
2022-03-17T11:39:49.000Z
|
# Generated by Django 2.1.9 on 2019-06-19 20:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project', '0022_author_creation_date'),
('user', '0013_auto_20190618_0832'),
]
operations = [
migrations.AddField(
model_name='credentialapplication',
name='project_of_interest',
field=models.ForeignKey(limit_choices_to={'access_policy': 2}, null=True, on_delete=django.db.models.deletion.SET_NULL, to='project.PublishedProject'),
),
]
| 28.809524
| 163
| 0.669421
|
e1be98ac9c029dd140a0a5a2f47bb00e189bb09a
| 6,744
|
py
|
Python
|
script/cached-benchmark/postprocess.py
|
TITAN-PyCompat/ck-tensorflow
|
6e42c2dc7a98ced05c2e74990b215407f06b542b
|
[
"BSD-3-Clause"
] | null | null | null |
script/cached-benchmark/postprocess.py
|
TITAN-PyCompat/ck-tensorflow
|
6e42c2dc7a98ced05c2e74990b215407f06b542b
|
[
"BSD-3-Clause"
] | null | null | null |
script/cached-benchmark/postprocess.py
|
TITAN-PyCompat/ck-tensorflow
|
6e42c2dc7a98ced05c2e74990b215407f06b542b
|
[
"BSD-3-Clause"
] | null | null | null |
#
# Copyright (c) 2018 cTuning foundation.
# See CK COPYRIGHT.txt for copyright details.
#
# SPDX-License-Identifier: BSD-3-Clause.
# See CK LICENSE.txt for licensing details.
#
import os
import json
TOP1 = 0
TOP5 = 0
IMAGES_COUNT = 0 # to be assigned
def ck_postprocess(i):
print('\n--------------------------------')
def my_env(var): return i['env'].get(var)
def dep_env(dep, var): return i['deps'][dep]['dict']['env'].get(var)
# Init variables from environment
BATCH_COUNT = int(my_env('CK_BATCH_COUNT'))
BATCH_SIZE = int(my_env('CK_BATCH_SIZE'))
global IMAGES_COUNT
IMAGES_COUNT = BATCH_COUNT * BATCH_SIZE
SKIP_IMAGES = int(my_env('CK_SKIP_IMAGES'))
RESULTS_DIR = 'predictions'
NUM_CLASSES = 1000
AUX_DIR = dep_env('imagenet-aux', 'CK_ENV_DATASET_IMAGENET_AUX')
CLASSES_FILE = os.path.join(AUX_DIR, 'synset_words.txt')
VALUES_FILE = os.path.join(AUX_DIR, 'val.txt')
CLASSES_LIST = []
VALUES_MAP = {}
IMAGE_FILE = my_env('CK_IMAGE_FILE')
FULL_REPORT = my_env('CK_SILENT_MODE') != 'YES'
# Loads ImageNet classes and correct predictions
def load_ImageNet_classes():
classes_list = []
with open(CLASSES_FILE, 'r') as classes_file:
classes_list = classes_file.read().splitlines()
values_map = {}
with open(VALUES_FILE, 'r') as values_file:
if IMAGE_FILE:
# Single file mode: try to find this file in values
for line in values_file:
file_name, file_class = line.split()
if file_name == IMAGE_FILE:
values_map[file_name] = int(file_class)
break
else:
# Directory mode: load only required amount of values
for _ in range(SKIP_IMAGES):
values_file.readline().split()
for _ in range(IMAGES_COUNT):
val = values_file.readline().split()
values_map[val[0]] = int(val[1])
return classes_list, values_map
# Returns printable string for ImageNet specific class
def get_class_str(class_index):
obj_class = CLASSES_LIST[class_index]
if len(obj_class) > 50:
obj_class = obj_class[:50] + '...'
return '(%d) %s' % (class_index, obj_class)
# Shows prediction results for image file
# top5 - list of pairs (prob, class_index)
def print_predictions(top5, img_file):
print('---------------------------------------')
if img_file in VALUES_MAP:
class_correct = VALUES_MAP[img_file]
print('%s - %s' % (img_file, get_class_str(class_correct)))
else:
print(img_file)
for prob, class_index in top5:
print('%.2f - %s' % (prob, get_class_str(class_index)))
print('---------------------------------------')
# Returns list of pairs (prob, class_index)
def get_top5(all_probs):
probs_with_classes = []
for class_index in range(len(all_probs)):
prob = all_probs[class_index]
probs_with_classes.append((prob, class_index))
sorted_probs = sorted(probs_with_classes, key = lambda pair: pair[0], reverse=True)
return sorted_probs[0:5]
# Calculates if prediction was correct for specified image file
# top5 - list of pairs (prob, class_index)
def check_predictions(top5, img_file):
if img_file not in VALUES_MAP:
print('Correctness information is not available')
return {}
class_correct = VALUES_MAP[img_file]
classes = [c[1] for c in top5]
is_top1 = class_correct == classes[0]
is_top5 = class_correct in classes
if is_top1:
global TOP1
TOP1 += 1
if is_top5:
global TOP5
TOP5 += 1
return {
'accuracy_top1': 'yes' if is_top1 else 'no',
'accuracy_top5': 'yes' if is_top5 else 'no',
'class_correct': class_correct,
'class_topmost': classes[0],
'file_name': img_file
}
frame_predictions = []
def calculate_precision():
print('Process results in {}'.format(RESULTS_DIR))
def load_probes(filename):
probes = []
with open(os.path.join(RESULTS_DIR, filename), 'r') as f:
for line in f:
s = line.strip()
if s: probes.append(float(s))
return probes
checked_files = 0
for res_file in sorted(os.listdir(RESULTS_DIR)):
# remove trailing suffix .txt
img_file = res_file[:-4]
checked_files += 1
all_probes = load_probes(res_file)
if len(all_probes) != NUM_CLASSES:
print('WARNING: {} is invalid probes count in file {}, results ignored'.format(len(all_probes), res_file))
global IMAGES_COUNT
IMAGES_COUNT -= 1
continue
top5 = get_top5(all_probes)
if FULL_REPORT:
print_predictions(top5, img_file)
elif checked_files % 100 == 0:
print('Predictions checked: {}'.format(checked_files))
res = check_predictions(top5, img_file)
frame_predictions.append(res)
global TOP1
global TOP5
TOP1 = 0
TOP5 = 0
CLASSES_LIST, VALUES_MAP = load_ImageNet_classes()
calculate_precision()
accuracy_top1 = TOP1 / float(IMAGES_COUNT) if IMAGES_COUNT > 0 else 0
accuracy_top5 = TOP5 / float(IMAGES_COUNT) if IMAGES_COUNT > 0 else 0
# Store benchmark results
openme = {}
# Preserve values stored by program
with open('tmp-ck-timer.json', 'r') as o:
old_values = json.load(o)
for key in old_values:
# xopenmp c++ writes this section, copy it into root object
if key == 'run_time_state':
for key1 in old_values[key]:
openme[key1] = old_values[key][key1]
else:
openme[key] = old_values[key]
setup_time = openme.get('setup_time_s', 0.0)
test_time = openme.get('test_time_s', 0.0)
total_load_images_time = openme.get('images_load_time_s', 0.0)
total_prediction_time = openme.get('prediction_time_total_s', 0.0)
avg_prediction_time = openme.get('prediction_time_avg_s', 0.0)
# Print metrics
print('\nSummary:')
print('-------------------------------')
print('Graph loaded in {:.6f}s'.format(setup_time))
print('All images loaded in {:.6f}s'.format(total_load_images_time))
print('All images classified in {:.6f}s'.format(total_prediction_time))
print('Average classification time: {:.6f}s'.format(avg_prediction_time))
print('Accuracy top 1: {} ({} of {})'.format(accuracy_top1, TOP1, IMAGES_COUNT))
print('Accuracy top 5: {} ({} of {})'.format(accuracy_top5, TOP5, IMAGES_COUNT))
openme['accuracy_top1'] = accuracy_top1
openme['accuracy_top5'] = accuracy_top5
openme['frame_predictions'] = frame_predictions
openme['execution_time'] = total_prediction_time
openme['execution_time_sum'] = setup_time + test_time
with open('tmp-ck-timer.json', 'w') as o:
json.dump(openme, o, indent=2, sort_keys=True)
print('--------------------------------\n')
return {'return': 0}
| 32.267943
| 114
| 0.64917
|
c3cfb3de27eb7fcf9251fdabf2fdfc0cf07c2628
| 505
|
py
|
Python
|
models/city.py
|
joseluis-max/AirBnB_clone_v2
|
da5755ab708beb8f31ce6387e01a2eb646b41b46
|
[
"MIT"
] | null | null | null |
models/city.py
|
joseluis-max/AirBnB_clone_v2
|
da5755ab708beb8f31ce6387e01a2eb646b41b46
|
[
"MIT"
] | null | null | null |
models/city.py
|
joseluis-max/AirBnB_clone_v2
|
da5755ab708beb8f31ce6387e01a2eb646b41b46
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
""" City Module for HBNB project """
from sqlalchemy import Column, String, ForeignKey
from sqlalchemy.orm import relationship
from models.base_model import BaseModel, Base
class City(BaseModel, Base):
""" The city class, contains state ID and name """
__tablename__ = 'cities'
state_id = Column(String(60), ForeignKey('states.id'), nullable=False)
name = Column(String(128), nullable=False)
places = relationship('Place', cascade='all, delete', backref='cities')
| 36.071429
| 75
| 0.722772
|
fb1bb8c969cef14d231d8528cb30e473cec40444
| 8,339
|
py
|
Python
|
toontown/coghq/LevelSuitPlannerAI.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
toontown/coghq/LevelSuitPlannerAI.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
toontown/coghq/LevelSuitPlannerAI.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
from panda3d.core import *
from direct.showbase import DirectObject
from toontown.suit import SuitDNA
from direct.directnotify import DirectNotifyGlobal
import LevelBattleManagerAI
import types
import random
class LevelSuitPlannerAI(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('LevelSuitPlannerAI')
def __init__(self, air, level, cogCtor, battleCtor, cogSpecs, reserveCogSpecs, battleCellSpecs, battleExpAggreg = None):
self.air = air
self.level = level
self.cogCtor = cogCtor
self.cogSpecs = cogSpecs
if simbase.config.GetBool('level-reserve-suits', 0):
self.reserveCogSpecs = reserveCogSpecs
else:
self.reserveCogSpecs = []
self.battleCellSpecs = battleCellSpecs
self.__genSuitInfos(self.level.getCogLevel(), self.level.getCogTrack())
self.battleMgr = LevelBattleManagerAI.LevelBattleManagerAI(self.air, self.level, battleCtor, battleExpAggreg)
self.battleCellId2suits = {}
for id in self.battleCellSpecs.keys():
self.battleCellId2suits[id] = []
def destroy(self):
self.battleMgr.destroyBattleMgr()
del self.battleMgr
self.battleCellId2suits = {}
self.ignoreAll()
del self.cogSpecs
del self.cogCtor
del self.level
del self.air
def __genJoinChances(self, num):
joinChances = []
for currChance in xrange(num):
joinChances.append(random.randint(1, 100))
joinChances.sort(cmp)
return joinChances
def __genSuitInfos(self, level, track):
def getSuitDict(spec, cogId, level = level, track = track):
suitDict = {}
suitDict['track'] = track
suitDict.update(spec)
suitDict['zoneId'] = self.level.getEntityZoneId(spec['parentEntId'])
suitDict['level'] += level
suitDict['cogId'] = cogId
return suitDict
self.suitInfos = {}
self.suitInfos['activeSuits'] = []
for i in xrange(len(self.cogSpecs)):
spec = self.cogSpecs[i]
self.suitInfos['activeSuits'].append(getSuitDict(spec, i))
numReserve = len(self.reserveCogSpecs)
joinChances = self.__genJoinChances(numReserve)
self.suitInfos['reserveSuits'] = []
for i in xrange(len(self.reserveCogSpecs)):
spec = self.reserveCogSpecs[i]
suitDict = getSuitDict(spec, i)
suitDict['joinChance'] = joinChances[i]
self.suitInfos['reserveSuits'].append(suitDict)
def __genSuitObject(self, suitDict, reserve):
suit = self.cogCtor(simbase.air, self)
dna = SuitDNA.SuitDNA()
dna.newSuitRandom(level=SuitDNA.getRandomSuitType(suitDict['level']), dept=suitDict['track'])
suit.dna = dna
suit.setLevel(suitDict['level'])
suit.setSkeleRevives(suitDict.get('revives'))
suit.setLevelDoId(self.level.doId)
suit.setCogId(suitDict['cogId'])
suit.setReserve(reserve)
if suitDict['skeleton']:
suit.setSkelecog(1)
suit.generateWithRequired(suitDict['zoneId'])
suit.boss = suitDict['boss']
return suit
def genSuits(self):
suitHandles = {}
activeSuits = []
for activeSuitInfo in self.suitInfos['activeSuits']:
suit = self.__genSuitObject(activeSuitInfo, 0)
suit.setBattleCellIndex(activeSuitInfo['battleCell'])
activeSuits.append(suit)
suitHandles['activeSuits'] = activeSuits
reserveSuits = []
for reserveSuitInfo in self.suitInfos['reserveSuits']:
suit = self.__genSuitObject(reserveSuitInfo, 1)
reserveSuits.append([suit, reserveSuitInfo['joinChance'], reserveSuitInfo['battleCell']])
suitHandles['reserveSuits'] = reserveSuits
return suitHandles
def __suitCanJoinBattle(self, cellId):
battle = self.battleMgr.getBattle(cellId)
if not battle.suitCanJoin():
return 0
return 1
def requestBattle(self, suit, toonId):
cellIndex = suit.getBattleCellIndex()
cellSpec = self.battleCellSpecs[cellIndex]
pos = cellSpec['pos']
zone = self.level.getZoneId(self.level.getEntityZoneEntId(cellSpec['parentEntId']))
maxSuits = 4
self.battleMgr.newBattle(cellIndex, zone, pos, suit, toonId, self.__handleRoundFinished, self.__handleBattleFinished, maxSuits)
for otherSuit in self.battleCellId2suits[cellIndex]:
if otherSuit is not suit:
if self.__suitCanJoinBattle(cellIndex):
self.battleMgr.requestBattleAddSuit(cellIndex, otherSuit)
else:
battle = self.battleMgr.getBattle(cellIndex)
if battle:
self.notify.warning('battle not joinable: numSuits=%s, joinable=%s, fsm=%s, toonId=%s' % (len(battle.suits),
battle.isJoinable(),
battle.fsm.getCurrentState().getName(),
toonId))
else:
self.notify.warning('battle not joinable: no battle for cell %s, toonId=%s' % (cellIndex, toonId))
return 0
return 1
def __handleRoundFinished(self, cellId, toonIds, totalHp, deadSuits):
totalMaxHp = 0
level = self.level
battle = self.battleMgr.cellId2battle[cellId]
for suit in battle.suits:
totalMaxHp += suit.maxHP
for suit in deadSuits:
level.suits.remove(suit)
cellReserves = []
for info in level.reserveSuits:
if info[2] == cellId:
cellReserves.append(info)
numSpotsAvailable = 4 - len(battle.suits)
if len(cellReserves) > 0 and numSpotsAvailable > 0:
self.joinedReserves = []
if len(battle.suits) == 0:
hpPercent = 100
else:
hpPercent = 100 - totalHp / totalMaxHp * 100.0
for info in cellReserves:
if info[1] <= hpPercent and len(self.joinedReserves) < numSpotsAvailable:
level.suits.append(info[0])
self.joinedReserves.append(info)
info[0].setBattleCellIndex(cellId)
for info in self.joinedReserves:
level.reserveSuits.remove(info)
if len(self.joinedReserves) > 0:
self.reservesJoining(battle)
level.d_setSuits()
return
if len(battle.suits) == 0:
if battle:
battle.resume()
else:
battle = self.battleMgr.cellId2battle.get(cellId)
if battle:
battle.resume()
def __handleBattleFinished(self, zoneId):
pass
def reservesJoining(self, battle):
for info in self.joinedReserves:
battle.suitRequestJoin(info[0])
battle.resume()
self.joinedReserves = []
def getDoId(self):
return 0
def removeSuit(self, suit):
suit.requestDelete()
def suitBattleCellChange(self, suit, oldCell, newCell):
if oldCell is not None:
if oldCell in self.battleCellId2suits:
self.battleCellId2suits[oldCell].remove(suit)
else:
self.notify.warning('FIXME crash bandaid suitBattleCellChange suit.doId =%s, oldCell=%s not in battleCellId2Suits.keys %s' % (suit.doId, oldCell, self.battleCellId2suits.keys()))
blocker = self.battleMgr.battleBlockers.get(oldCell)
if blocker:
blocker.removeSuit(suit)
if newCell is not None:
self.battleCellId2suits[newCell].append(suit)
def addSuitToBlocker(self = self):
blocker = self.battleMgr.battleBlockers.get(newCell)
if blocker:
blocker.addSuit(suit)
return 1
return 0
if not addSuitToBlocker():
self.accept(self.getBattleBlockerEvent(newCell), addSuitToBlocker)
return
def getBattleBlockerEvent(self, cellId):
return 'battleBlockerAdded-' + str(self.level.doId) + '-' + str(cellId)
| 38.252294
| 194
| 0.605588
|
22aa1e7bfe730572b77678fa4111664dea276bbe
| 3,674
|
py
|
Python
|
dufi/gui/boxes/custombox/exceptionboxui.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
dufi/gui/boxes/custombox/exceptionboxui.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
dufi/gui/boxes/custombox/exceptionboxui.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
exceptionboxui = '''\
<?xml version='1.0' encoding='utf-8'?>
<interface>
<object class="tk.Toplevel" id="ToplevelMainWindow">
<property name="height">200</property>
<property name="resizable">both</property>
<property name="title" translatable="yes">Application Error</property>
<property name="width">200</property>
<child>
<object class="ttk.Frame" id="FrameMainWindow">
<property name="height">200</property>
<property name="padding">5</property>
<property name="width">200</property>
<bind add="" handler="on_button_close" sequence="<Escape>" />
<layout>
<property name="column">0</property>
<property name="propagate">True</property>
<property name="row">0</property>
<property name="sticky">nsew</property>
<rows>
<row id="1">
<property name="minsize">5</property>
<property name="pad">0</property>
</row>
<row id="2">
<property name="weight">1</property>
</row>
</rows>
<columns>
<column id="0">
<property name="weight">1</property>
</column>
</columns>
</layout>
<child>
<object class="ttk.Label" id="LabelInfo">
<property name="text" translatable="yes">An error (exception) has occurred in the program.</property>
<property name="textvariable">string:caption</property>
<layout>
<property name="column">0</property>
<property name="propagate">True</property>
<property name="row">0</property>
<property name="sticky">nsew</property>
</layout>
</object>
</child>
<child>
<object class="ttk.Button" id="ButtonClose">
<property name="command">on_button_close</property>
<property name="text" translatable="yes">Close</property>
<bind add="" handler="on_button_close" sequence="<Return>" />
<layout>
<property name="column">1</property>
<property name="propagate">True</property>
<property name="row">0</property>
<property name="sticky">e</property>
</layout>
</object>
</child>
<child>
<object class="pygubu.builder.widgets.scrollbarhelper" id="scrollbarhelperLog">
<property name="scrolltype">both</property>
<layout>
<property name="column">0</property>
<property name="columnspan">2</property>
<property name="propagate">True</property>
<property name="row">2</property>
<property name="sticky">nsew</property>
</layout>
<child>
<object class="tk.Text" id="TextException">
<property name="height">20</property>
<property name="text" translatable="yes">01234567890123456789012345678901234567890123456789012345678901234567890123456789</property>
<property name="width">100</property>
<property name="wrap">word</property>
<layout>
<property name="column">0</property>
<property name="propagate">True</property>
<property name="row">0</property>
<property name="sticky">nsew</property>
</layout>
</object>
</child>
</object>
</child>
</object>
</child>
</object>
</interface>
'''
| 39.505376
| 148
| 0.538922
|
306e9558ef6090c1c7db72367273e5ef15c40f84
| 3,825
|
py
|
Python
|
ecs_connect/config.py
|
saurabhjambhule/ecs-connect
|
49fdffb03b1cb811513a5bc0720009540b29833f
|
[
"MIT"
] | null | null | null |
ecs_connect/config.py
|
saurabhjambhule/ecs-connect
|
49fdffb03b1cb811513a5bc0720009540b29833f
|
[
"MIT"
] | null | null | null |
ecs_connect/config.py
|
saurabhjambhule/ecs-connect
|
49fdffb03b1cb811513a5bc0720009540b29833f
|
[
"MIT"
] | null | null | null |
""" Config helper """
import os
from configparser import RawConfigParser
class ECSConfig():
""" Config handler class """
def __init__(self, logger):
self.logger = logger
self.config_path = os.path.expanduser('~') + '/.ecs_connect'
self._value = RawConfigParser()
self._value.read(self.config_path)
def get_cluster(self, profile):
""" Gets ECS cluster from config """
if self._value.has_option(profile, 'cluster'):
if self._value.has_option(profile, 'cluster'):
cluster = self._value.get(profile, 'cluster')
self.logger.info("Connecting to: %s cluster" % cluster)
else:
self.logger.error(
"No cluster parameter found"
)
exit(1)
else:
self.logger.error(
"No profile found. Please define a default profile, \
or specify a named profile using `--profile`"
)
exit(1)
return cluster
def get_awsprofile(self, profile):
""" Gets AWS profile from config """
awsprofile = None
if self._value.has_option(profile, 'awsprofile'):
awsprofile = self._value.get(profile, 'awsprofile')
else:
awsprofile = "default"
self.logger.info("%s is selected as awsprofile" % awsprofile)
return awsprofile
def get_service(self, profile):
""" Gets service from config """
service = None
if self._value.has_option(profile, 'service'):
service = self._value.get(profile, 'service')
else:
self.logger.error(
"No service parameter found"
)
exit(1)
self.logger.info("%s is selected for connection" % service)
return service
def get_task(self, profile):
""" Gets service from config """
task = None
if self._value.has_option(profile, 'task'):
task = self._value.get(profile, 'task')
self.logger.info("%s is selected as task" % task)
return task
def get_bastion(self, profile):
""" Gets bastion node id from config """
bastion = None
if self._value.has_option(profile, 'bastion'):
bastion = self._value.get(profile, 'bastion')
self.logger.info("%s is selected as bastion node" % bastion)
return bastion
def get_cmd(self, profile):
""" Gets init command from config """
cmd = None
if self._value.has_option(profile, 'cmd'):
cmd = self._value.get(profile, 'cmd')
else:
self.logger.error(
"No cmd parameter found"
)
exit(1)
self.logger.info("%s is selected as initilization command" % cmd)
return cmd
def get_ssh_user(self, profile):
""" Gets ssh user name from config """
ssh_user = 'root'
if self._value.has_option(profile, 'ssh_user'):
ssh_user = self._value.get(profile, 'ssh_user')
self.logger.info("%s is selected as a ssh user" % ssh_user)
return ssh_user
def get_ssh_key(self, profile):
""" Gets ssh key path from config """
ssh_key = '/home/ssm-user/bastion'
if self._value.has_option(profile, 'ssh_key'):
ssh_key = self._value.get(profile, 'ssh_key')
self.logger.info("%s is selected as a ssh user" % ssh_key)
return ssh_key
def get_ssh_port(self, profile):
""" Gets ssh key path from config """
ssh_port = '22'
if self._value.has_option(profile, 'ssh_port'):
ssh_port = self._value.get(profile, 'ssh_port')
self.logger.info("%s is selected as a ssh port" % ssh_port)
return ssh_port
| 34.459459
| 73
| 0.570196
|
754d5e1138b1a491f88b9c5c4f3b8568933f0b54
| 7,648
|
py
|
Python
|
appengine/monorail/project/project_helpers.py
|
xinghun61/infra
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
[
"BSD-3-Clause"
] | 2
|
2021-04-13T21:22:18.000Z
|
2021-09-07T02:11:57.000Z
|
appengine/monorail/project/project_helpers.py
|
asdfghjjklllllaaa/infra
|
8f63af54e46194cd29291813f2790ff6e986804d
|
[
"BSD-3-Clause"
] | 21
|
2020-09-06T02:41:05.000Z
|
2022-03-02T04:40:01.000Z
|
appengine/monorail/project/project_helpers.py
|
xinghun61/infra
|
b5d4783f99461438ca9e6a477535617fadab6ba3
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Helper functions and classes used by the project pages."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import re
import settings
from framework import framework_bizobj
from framework import framework_views
from framework import permissions
from project import project_views
from proto import project_pb2
_RE_EMAIL_SEPARATORS = re.compile(r'\s|,|;')
def BuildProjectMembers(cnxn, project, user_service):
"""Gather data for the members section of a project page.
Args:
cnxn: connection to SQL database.
project: Project PB of current project.
user_service: an instance of UserService for user persistence.
Returns:
A dictionary suitable for use with EZT.
"""
# First, get all needed info on all users in one batch of requests.
users_by_id = framework_views.MakeAllUserViews(
cnxn, user_service, framework_bizobj.AllProjectMembers(project))
# Second, group the user proxies by role for display.
owner_proxies = [users_by_id[owner_id]
for owner_id in project.owner_ids]
committer_proxies = [users_by_id[committer_id]
for committer_id in project.committer_ids]
contributor_proxies = [users_by_id[contrib_id]
for contrib_id in project.contributor_ids]
return {
'owners': owner_proxies,
'committers': committer_proxies,
'contributors': contributor_proxies,
'all_members': list(users_by_id.values()),
}
def BuildProjectAccessOptions(project):
"""Return a list of project access values for use in an HTML menu.
Args:
project: current Project PB, or None when creating a new project.
Returns:
A list of ProjectAccessView objects that can be used in EZT.
"""
access_levels = [project_pb2.ProjectAccess.ANYONE,
project_pb2.ProjectAccess.MEMBERS_ONLY]
access_views = []
for access in access_levels:
# Offer the allowed access levels. When editing an existing project,
# its current access level may always be kept, even if it is no longer
# in the list of allowed access levels for new projects.
if (access in settings.allowed_access_levels or
(project and access == project.access)):
access_views.append(project_views.ProjectAccessView(access))
return access_views
def ParseUsernames(cnxn, user_service, usernames_text):
"""Parse all usernames from a text field and return a list of user IDs.
Args:
cnxn: connection to SQL database.
user_service: an instance of UserService for user persistence.
usernames_text: string that the user entered into a form field for a list
of email addresses. Or, None if the browser did not send that value.
Returns:
A set of user IDs for the users named. Or, an empty set if the
usernames_field was not in post_data.
"""
if not usernames_text: # The user did not enter any addresses.
return set()
email_list = _RE_EMAIL_SEPARATORS.split(usernames_text)
# skip empty strings between consecutive separators
email_list = [email for email in email_list if email]
id_dict = user_service.LookupUserIDs(cnxn, email_list, autocreate=True)
return set(id_dict.values())
def ParseProjectAccess(project, access_num_str):
"""Parse and validate the "access" field out of post_data.
Args:
project: Project PB for the project that was edited, or None if the
user is creating a new project.
access_num_str: string of digits from the users POST that identifies
the desired project access level. Or, None if that widget was not
offered to the user.
Returns:
An enum project access level, or None if the user did not specify
any value or if the value specified was invalid.
"""
access = None
if access_num_str:
access_number = int(access_num_str)
available_access_levels = BuildProjectAccessOptions(project)
allowed_access_choices = [access_view.key for access_view
in available_access_levels]
if access_number in allowed_access_choices:
access = project_pb2.ProjectAccess(access_number)
return access
def MembersWithoutGivenIDs(project, exclude_ids):
"""Return three lists of member user IDs, with member_ids not in them."""
owner_ids = [user_id for user_id in project.owner_ids
if user_id not in exclude_ids]
committer_ids = [user_id for user_id in project.committer_ids
if user_id not in exclude_ids]
contributor_ids = [user_id for user_id in project.contributor_ids
if user_id not in exclude_ids]
return owner_ids, committer_ids, contributor_ids
def MembersWithGivenIDs(project, new_member_ids, role):
"""Return three lists of member IDs with the new IDs in the right one.
Args:
project: Project PB for the project to get current members from.
new_member_ids: set of user IDs for members being added.
role: string name of the role that new_member_ids should be granted.
Returns:
Three lists of member IDs with new_member_ids added to the appropriate
list and removed from any other role.
Raises:
ValueError: if the role is not one of owner, committer, or contributor.
"""
owner_ids, committer_ids, contributor_ids = MembersWithoutGivenIDs(
project, new_member_ids)
if role == 'owner':
owner_ids.extend(new_member_ids)
elif role == 'committer':
committer_ids.extend(new_member_ids)
elif role == 'contributor':
contributor_ids.extend(new_member_ids)
else:
raise ValueError()
return owner_ids, committer_ids, contributor_ids
def UsersInvolvedInProject(project):
"""Return a set of all user IDs referenced in the Project."""
result = set()
result.update(project.owner_ids)
result.update(project.committer_ids)
result.update(project.contributor_ids)
result.update([perm.member_id for perm in project.extra_perms])
return result
def UsersWithPermsInProject(project, perms_needed, users_by_id,
effective_ids_by_user):
# Users that have the given permission are stored in direct_users_for_perm,
# users whose effective ids have the given permission are stored in
# indirect_users_for_perm.
direct_users_for_perm = {perm: set() for perm in perms_needed}
indirect_users_for_perm = {perm: set() for perm in perms_needed}
# Iterate only over users that have extra permissions, so we don't
# have to search the extra perms more than once for each user.
for extra_perm_pb in project.extra_perms:
extra_perms = set(perm.lower() for perm in extra_perm_pb.perms)
for perm, users in direct_users_for_perm.items():
if perm.lower() in extra_perms:
users.add(extra_perm_pb.member_id)
# Then, iterate over all users, but don't compute extra permissions.
for user_id, user_view in users_by_id.items():
effective_ids = effective_ids_by_user[user_id].union([user_id])
user_perms = permissions.GetPermissions(
user_view.user, effective_ids, project)
for perm, users in direct_users_for_perm.items():
if not effective_ids.isdisjoint(users):
indirect_users_for_perm[perm].add(user_id)
if user_perms.HasPerm(perm, None, None, []):
users.add(user_id)
for perm, users in direct_users_for_perm.items():
users.update(indirect_users_for_perm[perm])
return direct_users_for_perm
| 36.075472
| 77
| 0.735094
|
c110104e22f3d365fe97834e08d4ead7539f798c
| 2,430
|
py
|
Python
|
data/cirq_new/cirq_program/startCirq_noisy743.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_noisy743.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/cirq_new/cirq_program/startCirq_noisy743.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=20
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=6
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=11
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=12
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=13
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=15
c.append(cirq.Y.on(input_qubit[3])) # number=16
c.append(cirq.Y.on(input_qubit[3])) # number=17
c.append(cirq.X.on(input_qubit[3])) # number=18
c.append(cirq.X.on(input_qubit[3])) # number=19
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
circuit = circuit.with_noise(cirq.depolarize(p=0.01))
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_noisy743.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 34.225352
| 77
| 0.6893
|
5a053ddba58ddce51a2126769a7292f702f4eecc
| 1,137
|
py
|
Python
|
puzzle/models/sql/phenotypeterm.py
|
robinandeer/puzzle
|
9476f05b416d3a5135d25492cb31411fdf831c58
|
[
"MIT"
] | 24
|
2015-10-15T16:29:58.000Z
|
2020-12-08T22:14:13.000Z
|
puzzle/models/sql/phenotypeterm.py
|
J35P312/PuzzleWin
|
20f2521306492722fc035b5db18927578f1eae4a
|
[
"MIT"
] | 212
|
2015-10-08T14:28:36.000Z
|
2020-04-29T22:44:10.000Z
|
puzzle/models/sql/phenotypeterm.py
|
J35P312/PuzzleWin
|
20f2521306492722fc035b5db18927578f1eae4a
|
[
"MIT"
] | 11
|
2015-10-08T09:26:46.000Z
|
2018-02-02T16:45:07.000Z
|
# -*- coding: utf-8 -*-
from sqlalchemy import Column, ForeignKey, Integer, String, UniqueConstraint
from sqlalchemy.orm import relationship
from .models import BASE
class PhenotypeTerm(BASE):
"""Represent a HPO phenotype term."""
__tablename__ = "phenotype_term"
__table_args__ = (UniqueConstraint('ind_id', 'phenotype_id',
name='_ind_phenotype_uc'),)
id = Column(Integer, primary_key=True)
phenotype_id = Column(String(32), nullable=False)
description = Column(String(128))
ind_id = Column(Integer, ForeignKey('individual.id'))
individual = relationship('Individual', backref=('phenotypes'))
@property
def hpo_link(self):
"""Return a HPO link."""
return ("http://compbio.charite.de/hpoweb/showterm?id={}"
.format(self.phenotype_id))
@property
def omim_link(self):
"""Return a OMIM phenotype link."""
return "http://www.omim.org/entry/{}".format(self.phenotype_id)
def __repr__(self):
return ("PhenotypeTerm(phenotype_id={this.phenotype_id})"
.format(this=self))
| 30.72973
| 76
| 0.64292
|
fb15777bd601277880d369bac1b4e55f1ff08d19
| 422
|
py
|
Python
|
src/simfoni/apps/company/migrations/0004_auto_20180510_1628.py
|
django-stars/simfoni-test
|
eaca4adc8177505e7c53e708456fd0dbb6be0b71
|
[
"MIT"
] | null | null | null |
src/simfoni/apps/company/migrations/0004_auto_20180510_1628.py
|
django-stars/simfoni-test
|
eaca4adc8177505e7c53e708456fd0dbb6be0b71
|
[
"MIT"
] | null | null | null |
src/simfoni/apps/company/migrations/0004_auto_20180510_1628.py
|
django-stars/simfoni-test
|
eaca4adc8177505e7c53e708456fd0dbb6be0b71
|
[
"MIT"
] | 4
|
2018-04-26T17:43:24.000Z
|
2018-05-10T14:11:09.000Z
|
# Generated by Django 2.0.4 on 2018-05-10 16:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0003_auto_20180510_1235'),
]
operations = [
migrations.AlterField(
model_name='rawcompany',
name='name',
field=models.CharField(max_length=255, verbose_name='Raw company name'),
),
]
| 22.210526
| 84
| 0.616114
|
c346e9e2a1338db4cd19d613db72dc1b0e8ad6cc
| 1,705
|
py
|
Python
|
example/reinforcement-learning/ddpg/strategies.py
|
axbaretto/mxnet
|
5f593885356ff6d14f5519fa18e79b944beb51cd
|
[
"Apache-2.0"
] | 36
|
2018-02-10T07:14:27.000Z
|
2021-09-03T09:11:59.000Z
|
example/reinforcement-learning/ddpg/strategies.py
|
yanghaojin/BMXNet
|
102f8d0ed59529bbd162c37bf07ae58ad6c4caa1
|
[
"Apache-2.0"
] | 3
|
2017-07-10T21:49:18.000Z
|
2017-07-12T22:40:06.000Z
|
example/reinforcement-learning/ddpg/strategies.py
|
yanghaojin/BMXNet
|
102f8d0ed59529bbd162c37bf07ae58ad6c4caa1
|
[
"Apache-2.0"
] | 15
|
2017-09-20T15:24:53.000Z
|
2018-01-11T11:14:03.000Z
|
import numpy as np
class BaseStrategy(object):
"""
Base class of exploration strategy.
"""
def get_action(self, obs, policy):
raise NotImplementedError
def reset(self):
pass
class OUStrategy(BaseStrategy):
"""
Ornstein-Uhlenbeck process: dxt = theta * (mu - xt) * dt + sigma * dWt
where Wt denotes the Wiener process.
"""
def __init__(self, env_spec, mu=0, theta=0.15, sigma=0.3):
self.mu = mu
self.theta = theta
self.sigma = sigma
self.action_space = env_spec.action_space
self.state = np.ones(self.action_space.flat_dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
def reset(self):
self.state = np.ones(self.action_space.flat_dim) * self.mu
def get_action(self, obs, policy):
# get_action accepts a 2D tensor with one row
obs = obs.reshape((1, -1))
action = policy.get_action(obs)
increment = self.evolve_state()
return np.clip(action + increment,
self.action_space.low,
self.action_space.high)
if __name__ == "__main__":
class Env1(object):
def __init__(self):
self.action_space = Env2()
class Env2(object):
def __init__(self):
self.flat_dim = 2
env_spec = Env1()
test = OUStrategy(env_spec)
states = []
for i in range(1000):
states.append(test.evolve_state()[0])
import matplotlib.pyplot as plt
plt.plot(states)
plt.show()
| 20.792683
| 78
| 0.578886
|
a9511dbd1a39c7ce1721c9eddb533497d8f40983
| 930
|
py
|
Python
|
mmdet/core/bbox/samplers/pseudo_sampler.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 232
|
2021-05-25T12:55:24.000Z
|
2022-03-25T07:58:49.000Z
|
mmdet/core/bbox/samplers/pseudo_sampler.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 51
|
2021-05-29T06:36:54.000Z
|
2022-03-27T09:24:39.000Z
|
mmdet/core/bbox/samplers/pseudo_sampler.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 66
|
2021-06-01T03:40:08.000Z
|
2022-03-30T16:51:21.000Z
|
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@BBOX_SAMPLERS.register_module()
class PseudoSampler(BaseSampler):
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
| 32.068966
| 79
| 0.673118
|
d6cc858dbbacacbac522059c83e407b8b4490b1a
| 3,866
|
py
|
Python
|
gensei/intersectors.py
|
rc/gensei
|
8a8be511b545e1618a3140295a564b09001e095e
|
[
"BSD-3-Clause"
] | 1
|
2015-03-05T10:57:30.000Z
|
2015-03-05T10:57:30.000Z
|
gensei/intersectors.py
|
rc/gensei
|
8a8be511b545e1618a3140295a564b09001e095e
|
[
"BSD-3-Clause"
] | null | null | null |
gensei/intersectors.py
|
rc/gensei
|
8a8be511b545e1618a3140295a564b09001e095e
|
[
"BSD-3-Clause"
] | null | null | null |
from scipy.linalg import eig, inv
from gensei.base import np, Object, pause
class Intersector(Object):
"""
Base class defining the intersector interface.
An intersector is a generalization of the concept of bounding box. Its
purpose is to quickly decide that two objects do not intersect. False
positives are, on the other hand possible. The possible results of an
intersector test are thus:
- objects do not intersect (no common inner or surface point)
- objects might intersect
"""
def __init__(self, **kwargs):
self.ibox = [[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1]]
self.segments = []
self.set_data(**kwargs)
def set_centre(self, centre):
"""
Set the intersector's centre.
"""
self.centre = np.array(centre, dtype=np.float64)
def intersects_fast(self, other):
"""
Test intersection of axes-aligned bounding boxes.
"""
sbox = self.get_aligned_bounding_box()
obox = other.get_aligned_bounding_box()
flag = False
val = np.empty((3,), dtype=np.float64)
for ii, ib in enumerate(self.ibox):
val[0] = sbox[0, ib[0]]
val[1] = sbox[1, ib[1]]
val[2] = sbox[2, ib[2]]
flag = np.any((obox[:,0] <= val) & (val <= obox[:,1]))
if flag:
break
else:
for ii, ib in enumerate(self.ibox):
val[0] = obox[0, ib[0]]
val[1] = obox[1, ib[1]]
val[2] = obox[2, ib[2]]
flag = np.any((sbox[:,0] <= val) & (val <= sbox[:,1]))
if flag:
break
return flag
def iter_segments(self):
return iter(self.segments)
class EllipsoidIntersector(Intersector):
"""
The intersector corresponding to a bounding ellipsoid.
"""
traits = {
'mtx' : '%s',
'mtx_hc' : '%s',
'centre' : '%s',
}
def __init__(self, mtx=None, mtx_hc=None, centre=None):
Intersector.__init__(self, mtx=mtx, mtx_hc=mtx_hc, centre=centre)
self.segments = [self]
def set_data(self, mtx=None, mtx_hc=None, centre=None):
if mtx is not None:
self.mtx = mtx
if mtx_hc is not None:
self.mtx_hc = mtx_hc
if centre is not None:
self.centre = centre
def get_origin_bounding_box(self):
"""
Get the ellipsoid's axes-aligned bounding box as if centered at the
origin.
Return:
bbox : 3 x 2 array
The bounding box.
"""
aux = np.sqrt(np.diag(inv(self.mtx)))[:,np.newaxis]
return np.c_[-aux, aux]
def get_aligned_bounding_box(self):
"""
Get the ellipsoid's axes-aligned bounding box.
Return:
bbox : 3 x 2 array
The bounding box.
"""
obb = self.get_origin_bounding_box()
return obb + self.centre[:,np.newaxis]
def intersects(self, other):
"""Test if two ellipsoids self and other intersect.
Returns
-------
flag : int
- 0 -> the ellipsoids are disjoint
- 1 -> touch in a single surface point
- 2 -> have common inner points
"""
A, B = self.mtx_hc, other.mtx_hc
eigs = eig(np.dot(-inv(A), B), left=False, right=False).real
roots = np.sort(eigs)
## print A, B, roots
if roots[2] > 0:
if roots[2] != roots[3]:
return 0
else:
return 1
else:
return 2
| 27.81295
| 75
| 0.503104
|
a26874b3456b16be15e1256393011f9c31f79d5c
| 4,750
|
py
|
Python
|
demo/image_classification/train.py
|
qizheng09/cloud
|
2a6aa526f2eec0d3e3aedca78d15bdc72b85bef9
|
[
"Apache-2.0"
] | null | null | null |
demo/image_classification/train.py
|
qizheng09/cloud
|
2a6aa526f2eec0d3e3aedca78d15bdc72b85bef9
|
[
"Apache-2.0"
] | 1
|
2018-02-13T11:39:03.000Z
|
2018-02-13T11:39:03.000Z
|
demo/image_classification/train.py
|
qizheng09/cloud
|
2a6aa526f2eec0d3e3aedca78d15bdc72b85bef9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import sys, os
import paddle.v2 as paddle
from vgg import vgg_bn_drop
from resnet import resnet_cifar10
with_gpu = os.getenv('WITH_GPU', '0') != '0'
def main():
datadim = 3 * 32 * 32
classdim = 10
# PaddlePaddle init
paddle.init(use_gpu=with_gpu, trainer_count=1)
image = paddle.layer.data(
name="image", type=paddle.data_type.dense_vector(datadim))
# Add neural network config
# option 1. resnet
# net = resnet_cifar10(image, depth=32)
# option 2. vgg
net = vgg_bn_drop(image)
out = paddle.layer.fc(input=net,
size=classdim,
act=paddle.activation.Softmax())
lbl = paddle.layer.data(
name="label", type=paddle.data_type.integer_value(classdim))
cost = paddle.layer.classification_cost(input=out, label=lbl)
# Create parameters
parameters = paddle.parameters.create(cost)
# Create optimizer
momentum_optimizer = paddle.optimizer.Momentum(
momentum=0.9,
regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128),
learning_rate=0.1 / 128.0,
learning_rate_decay_a=0.1,
learning_rate_decay_b=50000 * 100,
learning_rate_schedule='discexp')
# End batch and end pass event handler
def event_handler(event):
if isinstance(event, paddle.event.EndIteration):
if event.batch_id % 100 == 0:
print "\nPass %d, Batch %d, Cost %f, %s" % (
event.pass_id, event.batch_id, event.cost, event.metrics)
else:
sys.stdout.write('.')
sys.stdout.flush()
if isinstance(event, paddle.event.EndPass):
# save parameters
with open('params_pass_%d.tar' % event.pass_id, 'w') as f:
parameters.to_tar(f)
result = trainer.test(
reader=paddle.batch(
paddle.dataset.cifar.test10(), batch_size=128),
feeding={'image': 0,
'label': 1})
print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)
# Create trainer
trainer = paddle.trainer.SGD(cost=cost,
parameters=parameters,
update_equation=momentum_optimizer)
# Save the inference topology to protobuf.
inference_topology = paddle.topology.Topology(layers=out)
with open("inference_topology.pkl", 'wb') as f:
inference_topology.serialize_for_inference(f)
trainer.train(
reader=paddle.batch(
paddle.reader.shuffle(
paddle.dataset.cifar.train10(), buf_size=50000),
batch_size=128),
num_passes=200,
event_handler=event_handler,
feeding={'image': 0,
'label': 1})
# inference
from PIL import Image
import numpy as np
import os
def load_image(file):
im = Image.open(file)
im = im.resize((32, 32), Image.ANTIALIAS)
im = np.array(im).astype(np.float32)
# The storage order of the loaded image is W(widht),
# H(height), C(channel). PaddlePaddle requires
# the CHW order, so transpose them.
im = im.transpose((2, 0, 1)) # CHW
# In the training phase, the channel order of CIFAR
# image is B(Blue), G(green), R(Red). But PIL open
# image in RGB mode. It must swap the channel order.
im = im[(2, 1, 0), :, :] # BGR
im = im.flatten()
im = im / 255.0
return im
test_data = []
cur_dir = os.path.dirname(os.path.realpath(__file__))
test_data.append((load_image(cur_dir + '/image/dog.png'), ))
# users can remove the comments and change the model name
# with open('params_pass_50.tar', 'r') as f:
# parameters = paddle.parameters.Parameters.from_tar(f)
probs = paddle.infer(
output_layer=out, parameters=parameters, input=test_data)
lab = np.argsort(-probs) # probs and lab are the results of one batch data
print "Label of image/dog.png is: %d" % lab[0][0]
if __name__ == '__main__':
main()
| 34.172662
| 79
| 0.617263
|
dc77a04eb728cad43ab7764d234f9a9be51d840b
| 1,070
|
py
|
Python
|
src/data/transforms.py
|
LucasFidon/TRABIT_BraTS2021
|
5e950f57a8580356b0b4037477c5069113f3cf31
|
[
"BSD-3-Clause"
] | 5
|
2022-01-04T01:27:18.000Z
|
2022-02-10T13:43:01.000Z
|
src/data/transforms.py
|
LucasFidon/TRABIT_BraTS2021
|
5e950f57a8580356b0b4037477c5069113f3cf31
|
[
"BSD-3-Clause"
] | null | null | null |
src/data/transforms.py
|
LucasFidon/TRABIT_BraTS2021
|
5e950f57a8580356b0b4037477c5069113f3cf31
|
[
"BSD-3-Clause"
] | 2
|
2022-01-03T10:20:08.000Z
|
2022-01-19T05:54:19.000Z
|
# Copyright 2021 Lucas Fidon and Suprosanna Shit
from monai.transforms import MapTransform
from dataset_config.loader import load_brats_data_config
class ConvertToMultiChannelBasedOnBratsClassesd(MapTransform):
"""
Convert labels to multi channels based on brats classes:
label 1 is the peritumoral edema
label 2 is the necrotic and non-enhancing tumor
label 4 is the ehancing tumor.
Convert label 4 to label 3.
In case label 3 is found aklready present, we assume that the conversion
has been performed already and this transformation does nothing.
"""
def __init__(self, **kwargs):
# super(ConvertToMultiChannelBasedOnBratsClassesd, self).__init__(**kwargs)
config = load_brats_data_config()
self.ET_label = config['info']['labels']['ET']
super().__init__(**kwargs)
def __call__(self, data):
d = dict(data)
label3_present = (d[self.keys[0]] == 3).sum() > 0
if not label3_present:
d[self.keys[0]][d[self.keys[0]] == self.ET_label] = 3
return d
| 35.666667
| 83
| 0.688785
|
40939ab589c1df91f7f1ef487b5dc964260d91bf
| 29
|
py
|
Python
|
dagster/repository/dags/dags/__init__.py
|
nullhack/docker-airflow
|
c0e31ef6bcfcf5e213d6e8776aa12d544fe00921
|
[
"MIT"
] | 5
|
2020-01-05T07:25:45.000Z
|
2022-03-17T05:38:52.000Z
|
dagster/repository/dags/dags/__init__.py
|
nullhack/docker-airflow
|
c0e31ef6bcfcf5e213d6e8776aa12d544fe00921
|
[
"MIT"
] | null | null | null |
dagster/repository/dags/dags/__init__.py
|
nullhack/docker-airflow
|
c0e31ef6bcfcf5e213d6e8776aa12d544fe00921
|
[
"MIT"
] | 1
|
2019-10-23T09:48:33.000Z
|
2019-10-23T09:48:33.000Z
|
from .repository import dags
| 14.5
| 28
| 0.827586
|
39b998c6a6e41625e65bf8c9ee3c05dcd91fc625
| 78
|
py
|
Python
|
models.py
|
zhangruochi/Mol-HGT
|
81c1662cdfcf9796651c761c4c64715cf7be64ce
|
[
"MIT"
] | 3
|
2022-01-25T08:36:20.000Z
|
2022-02-23T09:16:49.000Z
|
models.py
|
zhangruochi/Mol-HGT
|
81c1662cdfcf9796651c761c4c64715cf7be64ce
|
[
"MIT"
] | 1
|
2022-02-15T10:19:26.000Z
|
2022-02-24T14:25:37.000Z
|
models.py
|
zhangruochi/Mol-HGT
|
81c1662cdfcf9796651c761c4c64715cf7be64ce
|
[
"MIT"
] | null | null | null |
from core.molPred_tfg.model import molHgt
models = {
'molHgt': molHgt,
}
| 13
| 41
| 0.692308
|
e6b0ccff6d4193568fa73b984c4dccbcf2bad902
| 1,183
|
py
|
Python
|
executing_updates.py
|
o-netzer/operations1
|
729fda93ac1c754fd4a5db0857f539637849e8bb
|
[
"MIT"
] | null | null | null |
executing_updates.py
|
o-netzer/operations1
|
729fda93ac1c754fd4a5db0857f539637849e8bb
|
[
"MIT"
] | null | null | null |
executing_updates.py
|
o-netzer/operations1
|
729fda93ac1c754fd4a5db0857f539637849e8bb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 09:25:12 2017
@author: netzer
This is a start script for a series of update scripts each of which fetches the
latest data from either some Oracle or a MySQL data base and saves it in some
output file (for details see the given paths below in the input section).
The script can be started at any time as often as you like - it will either
process the data if available or do no harm otherwise.
"""
##################### Input ##################################################
paths = [r'C:\Users\netzer\Meins\PythonScripts\operations\CHE_Auswertung.py',
r'C:\Users\netzer\Meins\PythonScripts\operations\AUT_Auswertung.py',
r'C:\Users\netzer\Meins\PythonScripts\operations\Deliveries.py',
r'C:\Users\netzer\Meins\PythonScripts\operations\bereits_aktiviert.py']
##################### Input ##################################################
for update in paths:
try:
exec(open(update).read(), globals())
except SystemExit:
print('No new data')
print('-----------------------------------------------------')
print('')
continue
| 39.433333
| 81
| 0.565511
|
a29e3e521f44919c211730a2de0f34d9bf11f4b3
| 2,461
|
py
|
Python
|
HARK/ConsumptionSaving/ConsIndShockModelDemos/TryAlternativeParameterValues.py
|
jvasile/HARK
|
6543432fe4105b28881d0fcf9e8c48c88caaac1d
|
[
"Apache-2.0"
] | null | null | null |
HARK/ConsumptionSaving/ConsIndShockModelDemos/TryAlternativeParameterValues.py
|
jvasile/HARK
|
6543432fe4105b28881d0fcf9e8c48c88caaac1d
|
[
"Apache-2.0"
] | 1
|
2019-03-22T10:14:44.000Z
|
2019-03-22T10:14:44.000Z
|
HARK/ConsumptionSaving/ConsIndShockModelDemos/TryAlternativeParameterValues.py
|
jvasile/HARK
|
6543432fe4105b28881d0fcf9e8c48c88caaac1d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 9 09:40:49 2017
@author: ccarroll@llorracc.org
"""
from __future__ import division, print_function
from builtins import str
from builtins import range
import pylab # the plotting tools
xPoints=100 # number of points at which to sample a function when plotting it using pylab
mMinVal = 0. # minimum value of the consumer's cash-on-hand to show in plots
mMaxVal = 5. # maximum value of the consumer's cash-on-hand to show in plots
import HARK.ConsumptionSaving.ConsumerParameters as Params # Read in the database of parameters
my_dictionary = Params.init_idiosyncratic_shocks # Create a dictionary containing the default values of the parameters
import numpy as np # Get the suite of tools for doing numerical computation in python
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType # Set up the tools for solving a consumer's problem
# define a function that generates the plot
def perturbParameterToGetcPlotList(base_dictionary,param_name,param_min,param_max,N=20,time_vary=False):
param_vec = np.linspace(param_min,param_max,num=N,endpoint=True) # vector of alternative values of the parameter to examine
thisConsumer = IndShockConsumerType(**my_dictionary) # create an instance of the consumer type
thisConsumer.cycles = 0 # Make this type have an infinite horizon
x = np.linspace(mMinVal,mMaxVal,xPoints,endpoint=True) # Define a vector of x values that span the range from the minimum to the maximum values of m
for j in range(N): # loop from the first to the last values of the parameter
if time_vary: # Some parameters are time-varying; others are not
setattr(thisConsumer,param_name,[param_vec[j]])
else:
setattr(thisConsumer,param_name,param_vec[j])
thisConsumer.update() # set up the preliminaries required to solve the problem
thisConsumer.solve() # solve the problem
y = thisConsumer.solution[0].cFunc(x) # Get the values of the consumption function at the points in the vector of x points
pylab.plot(x,y,label=str(round(param_vec[j],3))) # plot it and generate a label indicating the rounded value of the parameter
pylab.legend(loc='upper right') # put the legend in the upper right
return pylab # return the figure
cPlot_by_DiscFac = perturbParameterToGetcPlotList(my_dictionary,'DiscFac',0.899,0.999,5,False) # create the figure
cPlot_by_DiscFac.show() # show it
| 57.232558
| 152
| 0.761073
|
7c8ecf70752a4888dd1d31163cc67511df95bde6
| 5,639
|
py
|
Python
|
test/unit/proxy/controllers/test_container.py
|
ucsc-hp-group/swift
|
d6f1cb851d256ceffdff6d61513f42005e7ddcec
|
[
"Apache-2.0"
] | 2
|
2016-01-26T14:31:04.000Z
|
2016-01-26T14:31:08.000Z
|
test/unit/proxy/controllers/test_container.py
|
ucsc-hp-group/swift
|
d6f1cb851d256ceffdff6d61513f42005e7ddcec
|
[
"Apache-2.0"
] | null | null | null |
test/unit/proxy/controllers/test_container.py
|
ucsc-hp-group/swift
|
d6f1cb851d256ceffdff6d61513f42005e7ddcec
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from swift.common.swob import Request
from swift.proxy import server as proxy_server
from swift.proxy.controllers.base import headers_to_container_info
from test.unit import fake_http_connect, FakeRing, FakeMemcache
from swift.common.request_helpers import get_sys_meta_prefix
class TestContainerController(unittest.TestCase):
def setUp(self):
self.app = proxy_server.Application(None, FakeMemcache(),
account_ring=FakeRing(),
container_ring=FakeRing(),
object_ring=FakeRing())
def test_container_info_in_response_env(self):
controller = proxy_server.ContainerController(self.app, 'a', 'c')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, body='')):
req = Request.blank('/v1/a/c', {'PATH_INFO': '/v1/a/c'})
resp = controller.HEAD(req)
self.assertEqual(2, resp.status_int // 100)
self.assertTrue("swift.container/a/c" in resp.environ)
self.assertEqual(headers_to_container_info(resp.headers),
resp.environ['swift.container/a/c'])
def test_swift_owner(self):
owner_headers = {
'x-container-read': 'value', 'x-container-write': 'value',
'x-container-sync-key': 'value', 'x-container-sync-to': 'value'}
controller = proxy_server.ContainerController(self.app, 'a', 'c')
req = Request.blank('/v1/a/c')
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEquals(2, resp.status_int // 100)
for key in owner_headers:
self.assertTrue(key not in resp.headers)
req = Request.blank('/v1/a/c', environ={'swift_owner': True})
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, headers=owner_headers)):
resp = controller.HEAD(req)
self.assertEquals(2, resp.status_int // 100)
for key in owner_headers:
self.assertTrue(key in resp.headers)
def _make_callback_func(self, context):
def callback(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
context['method'] = method
context['path'] = path
context['headers'] = headers or {}
return callback
def test_sys_meta_headers_PUT(self):
# check that headers in sys meta namespace make it through
# the container controller
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
controller = proxy_server.ContainerController(self.app, 'a', 'c')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.PUT(req)
self.assertEqual(context['method'], 'PUT')
self.assertTrue(sys_meta_key in context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertTrue(user_meta_key in context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
def test_sys_meta_headers_POST(self):
# check that headers in sys meta namespace make it through
# the container controller
sys_meta_key = '%stest' % get_sys_meta_prefix('container')
sys_meta_key = sys_meta_key.title()
user_meta_key = 'X-Container-Meta-Test'
controller = proxy_server.ContainerController(self.app, 'a', 'c')
context = {}
callback = self._make_callback_func(context)
hdrs_in = {sys_meta_key: 'foo',
user_meta_key: 'bar',
'x-timestamp': '1.0'}
req = Request.blank('/v1/a/c', headers=hdrs_in)
with mock.patch('swift.proxy.controllers.base.http_connect',
fake_http_connect(200, 200, give_connect=callback)):
controller.POST(req)
self.assertEqual(context['method'], 'POST')
self.assertTrue(sys_meta_key in context['headers'])
self.assertEqual(context['headers'][sys_meta_key], 'foo')
self.assertTrue(user_meta_key in context['headers'])
self.assertEqual(context['headers'][user_meta_key], 'bar')
self.assertNotEqual(context['headers']['x-timestamp'], '1.0')
if __name__ == '__main__':
unittest.main()
| 45.475806
| 76
| 0.63646
|
ac40aa0fe5e517605f875a851d9f21017604b1be
| 3,505
|
py
|
Python
|
robokassa/views.py
|
movermeyer/django-robokassa
|
e6b2c42415db145540abf541e7b17c60fc4e1f09
|
[
"MIT"
] | 12
|
2015-11-25T06:56:16.000Z
|
2021-07-01T14:37:12.000Z
|
robokassa/views.py
|
movermeyer/django-robokassa
|
e6b2c42415db145540abf541e7b17c60fc4e1f09
|
[
"MIT"
] | 1
|
2015-12-01T05:03:00.000Z
|
2015-12-03T10:30:06.000Z
|
robokassa/views.py
|
movermeyer/django-robokassa
|
e6b2c42415db145540abf541e7b17c60fc4e1f09
|
[
"MIT"
] | 9
|
2015-11-30T23:43:37.000Z
|
2021-05-31T11:44:56.000Z
|
# coding: utf-8
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.views.decorators.csrf import csrf_exempt
from robokassa.conf import USE_POST
from robokassa.forms import (
ResultURLForm, SuccessRedirectForm, FailRedirectForm)
from robokassa.models import SuccessNotification
from robokassa.signals import (
result_received, success_page_visited, fail_page_visited)
@csrf_exempt
def receive_result(request, **credentials):
"""
Обработчик для ResultURL
"""
data = request.POST if USE_POST else request.GET
form = ResultURLForm(data, **credentials)
if form.is_valid():
inv_id = form.cleaned_data['InvId']
out_sum = form.cleaned_data['OutSum']
# сохраняем данные об успешном уведомлении в базе, чтобы
# можно было выполнить дополнительную проверку на странице успешного
# заказа
notification = SuccessNotification.objects.create(
InvId=inv_id, OutSum=out_sum
)
# дополнительные действия с заказом (например, смену его статуса) можно
# осуществить в обработчике сигнала robokassa.signals.result_received
result_received.send(
sender=notification, InvId=inv_id, OutSum=out_sum,
extra=form.extra_params())
return HttpResponse('OK%s' % inv_id)
return HttpResponse('error: bad signature')
@csrf_exempt
def success(request, template_name='robokassa/success.html',
extra_context=None,
error_template_name='robokassa/error.html', **credentials):
"""
Обработчик для SuccessURL.
"""
data = request.POST if USE_POST else request.GET
form = SuccessRedirectForm(data, **credentials)
if form.is_valid():
inv_id = form.cleaned_data['InvId']
out_sum = form.cleaned_data['OutSum']
# в случае, когда не используется строгая проверка, действия с заказом
# можно осуществлять в обработчике сигнала
# robokassa.signals.success_page_visited
success_page_visited.send(
sender=form, InvId=inv_id, OutSum=out_sum,
extra=form.extra_params())
context = {'InvId': inv_id, 'OutSum': out_sum, 'form': form}
context.update(form.extra_params())
context.update(extra_context or {})
return TemplateResponse(request, template_name, context)
return TemplateResponse(request, error_template_name, {'form': form})
@csrf_exempt
def fail(request, template_name='robokassa/fail.html', extra_context=None,
error_template_name='robokassa/error.html', **credentials):
"""
Обработчик для FailURL.
"""
data = request.POST if USE_POST else request.GET
form = FailRedirectForm(data, **credentials)
if form.is_valid():
inv_id = form.cleaned_data['InvId']
out_sum = form.cleaned_data['OutSum']
# дополнительные действия с заказом (например, смену его статуса для
# разблокировки товара на складе) можно осуществить в обработчике
# сигнала robokassa.signals.fail_page_visited
fail_page_visited.send(
sender=form, InvId=inv_id, OutSum=out_sum,
extra=form.extra_params())
context = {'InvId': inv_id, 'OutSum': out_sum, 'form': form}
context.update(form.extra_params())
context.update(extra_context or {})
return TemplateResponse(request, template_name, context)
return TemplateResponse(request, error_template_name, {'form': form})
| 35.765306
| 79
| 0.69301
|
dd88a4481fdbf69934595ba8eabdca69ec623012
| 9,626
|
py
|
Python
|
boml/upper_iter/forward.py
|
vis-opt-group/BDA
|
0350187b12cb1f36d87ac4f6bc2f15a282e8fac4
|
[
"MIT"
] | 2
|
2021-12-20T03:24:27.000Z
|
2022-01-10T14:16:21.000Z
|
boml/upper_iter/forward.py
|
vis-opt-group/BDA
|
0350187b12cb1f36d87ac4f6bc2f15a282e8fac4
|
[
"MIT"
] | null | null | null |
boml/upper_iter/forward.py
|
vis-opt-group/BDA
|
0350187b12cb1f36d87ac4f6bc2f15a282e8fac4
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, print_function, division
from collections import OrderedDict
import tensorflow as tf
from tensorflow.python.training import slot_creator
import sys
import boml.extension
from boml import utils
from boml.upper_iter.outer_grad import BOMLOuterGrad
from boml.utils import maybe_add,reduce_all_sums,dot
RAISE_ERROR_ON_DETACHED = False
class BOMLOuterGradForward(BOMLOuterGrad):
def __init__(
self, inner_method="Trad", name="BOMLOuterGradForward"
):
"""
Utility method to initialize truncated reverse HG (not necessarily online),
:param name: a name for the operations and variables that will be created
:return: Forward object
"""
super(BOMLOuterGradForward, self).__init__(name)
self._forward_initializer = tf.no_op()
self._zs = {} # hyperparameter - zs dictionary
self._z_iter = tf.no_op()
self._iteration = None
self.A_dot_zs = {}
_HYPER_RANK_ERROR_MESSAGE = """
ForwardHG: Only scalar hyperparameters accepted.\n
Hyperparameter tensor {} has rank {}.\n
Use keyword argument far_ho.get_hyperparameter(..., scalar=True) on hyperparameter creation.
"""
# noinspection SpellCheckingInspection
def compute_gradients(
self, outer_objective, inner_grad, meta_param=None, param_dict=OrderedDict()
):
"""
Function that adds to the computational graph all the operations needend for computing
the hypergradients in a "dynamic" way, without unrolling the entire optimization graph.
The resulting computation, while being roughly 2x more expensive then unrolling the
optimizaiton dynamics, requires much less (GPU) memory and is more flexible, allowing
to set a termination condition to the parameters optimizaiton routine.
:param inner_grad: OptimzerDict object resulting from the inner objective optimization.
:param outer_objective: A loss function for the outer parameters (scalar tensor)
:param meta_param: Optional list of outer parameters to consider. If not provided will get all variables in the
hyperparameter collection in the current scope.
:return: list of outer parameters involved in the computation
"""
meta_param = super(BOMLOuterGradForward, self).compute_gradients(outer_objective, inner_grad, meta_param)
# scalar_hyper_list
with tf.variable_scope(outer_objective.op.name):
# dynamics_vec = vectorize_all(optimizer_dict.dynamics) # in the new implementation there's no need of
# vectorizing... it might be more efficient since it's better to avoid too many reshaping operations...
d_oo_d_state = tf.gradients(outer_objective, list(inner_grad.state))
with tf.name_scope('DUMMY'): # variables to compute forward propagation
# TODO avoid this computation if optimizer_dict has already been seen.
aux_vs = [tf.zeros_like(v) for v in inner_grad.state]
dynamics_dot_aux_v = reduce_all_sums(list(inner_grad.dynamics), aux_vs)
der_dynamics_dot_aux_v = tf.gradients(dynamics_dot_aux_v, list(inner_grad.state))
# this is a list of jacobians times aux_vs that have the same dimension of states variables.
init_dynamics_dot_aux_v = None
if inner_grad.init_dynamics:
# init_dynamics_dot_aux_v = dot(vectorize_all(optimizer_dict.init_dynamics), aux_v_vec) # old impl
init_dynamics_dot_aux_v = reduce_all_sums(
inner_grad.init_dynamics, aux_vs)
for meta_par in meta_param:
assert meta_par.shape.ndims >= 0, BOMLOuterGradForward._HYPER_RANK_ERROR_MESSAGE.format(meta_par, meta_par.shape.ndims)
d_init_dyn_d_hyp = None if init_dynamics_dot_aux_v is None else \
tf.gradients(init_dynamics_dot_aux_v, meta_par)[0]
d_dyn_d_hyp = tf.gradients(dynamics_dot_aux_v, meta_par)[0]
d_oo_d_hyp = tf.gradients(outer_objective, meta_par)[0]
# ------------------------------------------------------------
# check detached hyperparameters (for which hypergradient would be always null)
hyper_ok = d_init_dyn_d_hyp is not None or d_dyn_d_hyp is not None or d_oo_d_hyp is not None
if RAISE_ERROR_ON_DETACHED:
# try:
assert hyper_ok, BOMLOuterGrad._ERROR_HYPER_DETACHED.format(meta_par)
# ex
else:
if not hyper_ok:
print(BOMLOuterGrad._ERROR_HYPER_DETACHED.format(meta_par), file=sys.stderr)
meta_param.remove(meta_par)
# -------------------------------------------------------------
# UPDATE OF TOTAL DERIVATIVE OF STATE W.R.T. HYPERPARAMETER
zs = BOMLOuterGradForward._create_zs(
inner_grad, meta_par, None if d_init_dyn_d_hyp is None else tf.gradients(d_init_dyn_d_hyp, aux_vs)
) # this is one z for each variable
self._zs[meta_par] = zs # store a reference for the total derivatives for easy access
Bs = tf.gradients(d_dyn_d_hyp, aux_vs)
A_dot_zs = tf.gradients(reduce_all_sums(der_dynamics_dot_aux_v, zs), aux_vs)
self.A_dot_zs[meta_par] = A_dot_zs
_z_iter = tf.group(*[
z.assign(maybe_add(A_dot_z, B)) for z, A_dot_z, B
in zip(zs, A_dot_zs, Bs)
])
self._z_iter = tf.group(self._z_iter, _z_iter)
# -- HYPERGRADIENT -----
d_E_T = [dot(d_oo_d_s, z) for d_oo_d_s, z in zip(d_oo_d_state, zs)
if d_oo_d_s is not None and z is not None] # list of dot products
hg = maybe_add(tf.reduce_sum(d_E_T), d_oo_d_hyp) # sum the partial dot products and possibly ->
# adds the ''direct derivative'' term d(E( . , \lambda))/d \lambda
self._hypergrad_dictionary[meta_par].append(hg)
self._forward_initializer = tf.group(self._forward_initializer,
tf.variables_initializer(zs))
return meta_param
@staticmethod
def _create_zs(optimizer_dict, hyper, d_init_dynamics_d_hyper):
if d_init_dynamics_d_hyper is None: d_init_dynamics_d_hyper = [None] * len(optimizer_dict)
with tf.variable_scope('Z'):
z = [slot_creator.create_slot(v, utils.val_or_zero(der, v), hyper.op.name) for v, der
in zip(optimizer_dict.state, d_init_dynamics_d_hyper)]
[tf.add_to_collection(boml.extension.GraphKeys.ZS, lm) for lm in z]
# in this case it is completely fine to keep zs into the global variable...
return z
@staticmethod
def _create_outergradient_from_dodh(hyper, doo_dhypers):
"""
Creates one hyper-gradient as a variable. doo_dhypers: initialization, that is the derivative of
the outer objective w.r.t this hyper
"""
hgs = slot_creator.create_slot(
hyper, utils.val_or_zero(doo_dhypers, hyper), "outergradient"
)
boml.extension.remove_from_collection(
boml.extension.GraphKeys.GLOBAL_VARIABLES, hgs
)
return hgs
@staticmethod
def _create_outergradient(outer_obj, hyper):
return BOMLOuterGradForward._create_outergradient_from_dodh(
hyper, tf.gradients(outer_obj, hyper)[0]
)
def _state_feed_dict_generator(self, history, T_or_generator):
for t, his in zip(utils.solve_int_or_generator(T_or_generator), history):
yield t, utils.merge_dicts(
*[
od.state_feed_dict(h)
for od, h in zip(sorted(self._optimizer_dicts), his)
]
)
def apply_gradients(
self,
inner_objective_feed_dicts=None,
outer_objective_feed_dicts=None,
initializer_feed_dict=None,
param_dict=OrderedDict(),
train_batches=None,
experiments=[],
global_step=None,
session=None,
):
ss = session or tf.get_default_session()
self._run_batch_initialization(ss, utils.maybe_call(
initializer_feed_dict, utils.maybe_eval(global_step, ss)))
for t in utils.solve_int_or_generator(param_dict['T']):
_fd = utils.maybe_call(inner_objective_feed_dicts, t)
self._forward_step(ss, _fd)
def _forward_step(self, ss, _fd):
ss.run(self._z_iter, _fd)
ss.run(self.iteration, _fd)
def _run_batch_initialization(self, ss, fd):
ss.run(self.initialization, feed_dict=fd)
ss.run(self._forward_initializer, feed_dict=fd)
@property
def w_dots(self):
# if hyper: return self._zs[hyper]
return [{h: self._zs[h][k] for h in self._zs} for k, _ in enumerate(self.state)]
def z_callback(self, hyperparameter=None, flatten=True):
zs_values = []
zs = list(self._zs.values()) if hyperparameter is None else self._zs[hyperparameter]
if flatten: zs = utils.vectorize_all(zs)
# noinspection PyUnusedLocal
def _callback(_, __, ss):
zs_values.append(ss.run(zs)) # these should not depend from any feed dictionary
return zs_values, _callback
| 45.838095
| 135
| 0.636921
|
5233c4daf1896d6ab5285a32614b975a9ba05762
| 12,524
|
py
|
Python
|
slowfast/visualization/demo_loader.py
|
Morgan-Gan/Slowfast-fb
|
f7387d5f30d609f6d0e022a81c51729b647dae97
|
[
"Apache-2.0"
] | null | null | null |
slowfast/visualization/demo_loader.py
|
Morgan-Gan/Slowfast-fb
|
f7387d5f30d609f6d0e022a81c51729b647dae97
|
[
"Apache-2.0"
] | null | null | null |
slowfast/visualization/demo_loader.py
|
Morgan-Gan/Slowfast-fb
|
f7387d5f30d609f6d0e022a81c51729b647dae97
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import atexit
import copy
import queue
import threading
import time
import cv2
import slowfast.utils.loggings as logging
from slowfast.visualization.utils import TaskInfo
logger = logging.get_logger(__name__)
class VideoManager:
"""
VideoManager object for getting frames from video source for inference.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
assert (
cfg.DEMO.WEBCAM > -1 or cfg.DEMO.INPUT_VIDEO != ""
), "Must specify a data source as input."
self.source = (
cfg.DEMO.WEBCAM if cfg.DEMO.WEBCAM > -1 else cfg.DEMO.INPUT_VIDEO
)
self.display_width = cfg.DEMO.DISPLAY_WIDTH
self.display_height = cfg.DEMO.DISPLAY_HEIGHT
self.cap = cv2.VideoCapture(self.source)
if self.display_width > 0 and self.display_height > 0:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.display_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.display_height)
else:
self.display_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.display_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if not self.cap.isOpened():
raise IOError("Video {} cannot be opened".format(self.source))
self.output_file = None
if cfg.DEMO.OUTPUT_FPS == -1:
self.output_fps = self.cap.get(cv2.CAP_PROP_FPS)
else:
self.output_fps = cfg.DEMO.OUTPUT_FPS
if cfg.DEMO.OUTPUT_FILE != "":
self.output_file = self.get_output_file(
cfg.DEMO.OUTPUT_FILE, fps=self.output_fps
)
self.id = -1
self.buffer = []
self.buffer_size = cfg.DEMO.BUFFER_SIZE
self.seq_length = cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE
self.test_crop_size = cfg.DATA.TEST_CROP_SIZE
self.clip_vis_size = cfg.DEMO.CLIP_VIS_SIZE
def __iter__(self): # __iter__支持迭代的集合对象
return self
def __next__(self):
"""
Read and return the required number of frames for 1 clip.
Returns:
was_read (bool): False if not enough frames to return.
task (TaskInfo object): object contains metadata for the current clips.
"""
self.id += 1
task = TaskInfo()
task.img_height = self.display_height
task.img_width = self.display_width
task.crop_size = self.test_crop_size
task.clip_vis_size = self.clip_vis_size
frames = []
if len(self.buffer) != 0:
frames = self.buffer
was_read = True
while was_read and len(frames) < self.seq_length:
was_read, frame = self.cap.read()
frames.append(frame)
if was_read and self.buffer_size != 0:
self.buffer = frames[-self.buffer_size :]
task.add_frames(self.id, frames) # Add the clip and corresponding id
task.num_buffer_frames = 0 if self.id == 0 else self.buffer_size
return was_read, task
def get_output_file(self, path, fps=30):
"""
Return a video writer object.
Args:
path (str): path to the output video file.
fps (int or float): frames per second.
"""
return cv2.VideoWriter(
filename=path,
fourcc=cv2.VideoWriter_fourcc(*"mp4v"),
fps=float(fps),
frameSize=(self.display_width, self.display_height),
isColor=True,
)
def display(self, task):
"""
Either display a single frame (BGR image) to a window or write to
an output file if output path is provided.
Args:
task (TaskInfo object): task object that contain
the necessary information for prediction visualization. (e.g. visualized frames.)
"""
for frame in task.frames[task.num_buffer_frames :]:
if self.output_file is None:
cv2.imshow("SlowFast", frame)
time.sleep(1 / self.output_fps)
else:
self.output_file.write(frame)
def clean(self):
"""
Clean up open video files and windows.
"""
self.cap.release()
if self.output_file is None:
cv2.destroyAllWindows()
else:
self.output_file.release()
def start(self):
return self
def join(self):
pass
class ThreadVideoManager:
"""
VideoManager object for getting frames from video source for inference
using multithreading for read and write frames.
"""
def __init__(self, cfg):
"""
Args:
cfg (CfgNode): configs. Details can be found in
slowfast/config/defaults.py
"""
assert (
cfg.DEMO.WEBCAM > -1 or cfg.DEMO.INPUT_VIDEO != ""
), "Must specify a data source as input."
self.source = (
cfg.DEMO.WEBCAM if cfg.DEMO.WEBCAM > -1 else cfg.DEMO.INPUT_VIDEO
)
self.display_width = cfg.DEMO.DISPLAY_WIDTH
self.display_height = cfg.DEMO.DISPLAY_HEIGHT
self.cap = cv2.VideoCapture(self.source)
if self.display_width > 0 and self.display_height > 0:
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.display_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.display_height)
else:
self.display_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.display_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
if not self.cap.isOpened():
raise IOError("Video {} cannot be opened".format(self.source))
self.output_file = None
if cfg.DEMO.OUTPUT_FPS == -1:
self.output_fps = self.cap.get(cv2.CAP_PROP_FPS)
else:
self.output_fps = cfg.DEMO.OUTPUT_FPS
if cfg.DEMO.OUTPUT_FILE != "":
self.output_file = self.get_output_file(
cfg.DEMO.OUTPUT_FILE, fps=self.output_fps
)
self.num_skip = cfg.DEMO.NUM_CLIPS_SKIP + 1
self.get_id = -1
self.put_id = -1
self.buffer = []
self.buffer_size = cfg.DEMO.BUFFER_SIZE
self.seq_length = cfg.DATA.NUM_FRAMES * cfg.DATA.SAMPLING_RATE
self.test_crop_size = cfg.DATA.TEST_CROP_SIZE
self.clip_vis_size = cfg.DEMO.CLIP_VIS_SIZE
self.read_queue = queue.Queue()
self.write_queue = {}
self.not_end = True
self.write_lock = threading.Lock()
self.put_id_lock = threading.Lock()
self.input_lock = threading.Lock()
self.output_lock = threading.Lock()
self.stopped = False
atexit.register(self.clean)
def get_output_file(self, path, fps=30):
"""
Return a video writer object.
Args:
path (str): path to the output video file.
fps (int or float): frames per second.
"""
return cv2.VideoWriter(
filename=path,
fourcc=cv2.VideoWriter_fourcc(*"mp4v"),
fps=float(fps),
frameSize=(self.display_width, self.display_height),
isColor=True,
)
def __iter__(self):
return self
def put_fn(self):
"""
Grabbing frames from VideoCapture.
"""
was_read = True
while was_read and not self.stopped:
task = TaskInfo()
task.img_height = self.display_height
task.img_width = self.display_width
task.crop_size = self.test_crop_size
task.clip_vis_size = self.clip_vis_size
frames = []
if len(self.buffer) != 0:
frames = self.buffer
self.input_lock.acquire()
while was_read and len(frames) < self.seq_length:
was_read, frame = self.cap.read()
if was_read:
frames.append(frame)
self.input_lock.release()
if was_read:
self.buffer = frames[-self.buffer_size :]
task.add_frames(self.put_id + 1, frames)
task.num_buffer_frames = (
0 if self.put_id == -1 else self.buffer_size
)
with self.put_id_lock:
self.put_id += 1
self.not_end = was_read
# If mode is to read the most recent clip or we reach task
# index that is not supposed to be skipped.
if self.num_skip == 0 or self.put_id % self.num_skip == 0:
self.read_queue.put((was_read, copy.deepcopy(task)))
else:
with self.write_lock:
self.write_queue[task.id] = (was_read, copy.deepcopy(task))
def __next__(self):
# If there is nothing in the task queue.
if self.read_queue.qsize() == 0:
return self.not_end, None
else:
with self.put_id_lock:
put_id = self.put_id
was_read, task = None, None
# If mode is to predict most recent read clip.
if self.num_skip == 0:
# Write all previous clips to write queue.
with self.write_lock:
while True:
was_read, task = self.read_queue.get()
if task.id == put_id:
break
self.write_queue[task.id] = (was_read, task)
else:
was_read, task = self.read_queue.get()
# If we reach the end of the video.
if not was_read:
# Put to write queue.
with self.write_lock:
self.write_queue[put_id] = was_read, copy.deepcopy(task)
task = None
return was_read, task
def get_fn(self):
while not self.stopped:
with self.put_id_lock:
put_id = self.put_id
not_end = self.not_end
with self.write_lock:
# If video ended and we have display all frames.
if not not_end and self.get_id == put_id:
break
# If the next frames are not available, wait.
if (
len(self.write_queue) == 0
or self.write_queue.get(self.get_id + 1) is None
):
time.sleep(0.02)
continue
else:
self.get_id += 1
was_read, task = self.write_queue[self.get_id]
del self.write_queue[self.get_id]
with self.output_lock:
for frame in task.frames[task.num_buffer_frames :]:
if self.output_file is None:
cv2.imshow("SlowFast", frame)
time.sleep(1 / self.output_fps)
else:
self.output_file.write(frame)
def display(self, task):
"""
Add the visualized task to the write queue for display/write to outputfile.
Args:
task (TaskInfo object): task object that contain
the necessary information for prediction visualization. (e.g. visualized frames.)
"""
with self.write_lock:
self.write_queue[task.id] = (True, task)
def start(self):
"""
Start threads to read and write frames.
"""
self.put_thread = threading.Thread(
target=self.put_fn, args=(), name="VidRead-Thread", daemon=True
)
self.put_thread.start()
self.get_thread = threading.Thread(
target=self.get_fn, args=(), name="VidDisplay-Thread", daemon=True
)
self.get_thread.start()
return self
def join(self):
self.get_thread.join()
def clean(self):
"""
Clean up open video files and windows.
"""
self.stopped = True
self.input_lock.acquire()
self.cap.release()
self.input_lock.release()
self.output_lock.acquire()
if self.output_file is None:
cv2.destroyAllWindows()
else:
self.output_file.release()
self.output_lock.release()
| 34.125341
| 103
| 0.560604
|
e6b4baf3a4468e37654d2e2e73be30608f51f42a
| 13,099
|
py
|
Python
|
tests/test_color_objects.py
|
ssaw/python-colormath
|
618fadb5c56225d6c171a26be909cb3a12824fa6
|
[
"BSD-3-Clause"
] | 1
|
2019-06-10T20:06:31.000Z
|
2019-06-10T20:06:31.000Z
|
tests/test_color_objects.py
|
ssaw/python-colormath
|
618fadb5c56225d6c171a26be909cb3a12824fa6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_color_objects.py
|
ssaw/python-colormath
|
618fadb5c56225d6c171a26be909cb3a12824fa6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Various tests for color objects.
"""
import unittest
from colormath.color_conversions import convert_color
from colormath.color_objects import SpectralColor, XYZColor, xyYColor, \
LabColor, LuvColor, LCHabColor, LCHuvColor, RGBColor, HSLColor, HSVColor, \
CMYColor, CMYKColor
class BaseColorConversionTest(unittest.TestCase):
"""
All color conversion tests should inherit from this class. Has some
convenience methods for re-use.
"""
# noinspection PyPep8Naming
def assertColorMatch(self, conv, std):
"""
Checks a converted color against an expected standard.
:param conv: The converted color object.
:param std: The object to use as a standard for comparison.
"""
self.assertEqual(conv.__class__, std.__class__)
attribs = std.VALUES
for attrib in attribs:
conv_value = getattr(conv, attrib)
std_value = getattr(std, attrib)
self.assertAlmostEqual(
conv_value, std_value, 3,
"%s is %s, expected %s" % (attrib, conv_value, std_value))
class SpectralConversionTestCase(BaseColorConversionTest):
def setUp(self):
"""
While it is possible to specify the entire spectral color using
positional arguments, set this thing up with keywords for the ease of
manipulation.
"""
color = SpectralColor(
spec_380nm=0.0600, spec_390nm=0.0600, spec_400nm=0.0641,
spec_410nm=0.0654, spec_420nm=0.0645, spec_430nm=0.0605,
spec_440nm=0.0562, spec_450nm=0.0543, spec_460nm=0.0537,
spec_470nm=0.0541, spec_480nm=0.0559, spec_490nm=0.0603,
spec_500nm=0.0651, spec_510nm=0.0680, spec_520nm=0.0705,
spec_530nm=0.0736, spec_540nm=0.0772, spec_550nm=0.0809,
spec_560nm=0.0870, spec_570nm=0.0990, spec_580nm=0.1128,
spec_590nm=0.1251, spec_600nm=0.1360, spec_610nm=0.1439,
spec_620nm=0.1511, spec_630nm=0.1590, spec_640nm=0.1688,
spec_650nm=0.1828, spec_660nm=0.1996, spec_670nm=0.2187,
spec_680nm=0.2397, spec_690nm=0.2618, spec_700nm=0.2852,
spec_710nm=0.2500, spec_720nm=0.2400, spec_730nm=0.2300)
self.color = color
def test_conversion_to_xyz(self):
xyz = convert_color(self.color, XYZColor)
self.assertColorMatch(xyz, XYZColor(0.115, 0.099, 0.047))
def test_conversion_to_xyz_with_negatives(self):
"""
This has negative spectral values, which should never happen. Just
clamp these to 0.0 instead of running into the domain errors. A badly
or uncalibrated spectro can sometimes report negative values.
"""
self.color.spec_530nm = -0.0736
# TODO: Convert here.
def test_convert_to_self(self):
same_color = convert_color(self.color, SpectralColor)
self.assertEqual(self.color, same_color)
class XYZConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = XYZColor(0.1, 0.2, 0.3)
def test_conversion_to_xyy(self):
xyy = convert_color(self.color, xyYColor)
self.assertColorMatch(xyy, xyYColor(0.167, 0.333, 0.200))
def test_conversion_to_lab(self):
lab = convert_color(self.color, LabColor)
self.assertColorMatch(lab, LabColor(51.837, -57.486, -25.780))
def test_conversion_to_rgb(self):
# Picked a set of XYZ coordinates that would return a good RGB value.
self.color = XYZColor(0.300, 0.200, 0.300)
rgb = convert_color(self.color, RGBColor)
self.assertColorMatch(rgb, RGBColor(0.715, 0.349, 0.663))
def test_conversion_to_luv(self):
luv = convert_color(self.color, LuvColor)
self.assertColorMatch(luv, LuvColor(51.837, -73.561, -25.657))
def test_convert_to_self(self):
same_color = convert_color(self.color, XYZColor)
self.assertEqual(self.color, same_color)
# noinspection PyPep8Naming
class xyYConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = xyYColor(0.167, 0.333, 0.200)
def test_conversion_to_xyz(self):
xyz = convert_color(self.color, XYZColor)
self.assertColorMatch(xyz, XYZColor(0.100, 0.200, 0.300))
def test_convert_to_self(self):
same_color = convert_color(self.color, xyYColor)
self.assertEqual(self.color, same_color)
class LabConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = LabColor(1.807, -3.749, -2.547)
def test_conversion_to_xyz(self):
xyz = convert_color(self.color, XYZColor)
self.assertColorMatch(xyz, XYZColor(0.001, 0.002, 0.003))
def test_conversion_to_lchab(self):
lch = convert_color(self.color, LCHabColor)
self.assertColorMatch(lch, LCHabColor(1.807, 4.532, 214.191))
def test_convert_to_self(self):
same_color = convert_color(self.color, LabColor)
self.assertEqual(self.color, same_color)
class LuvConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = LuvColor(1.807, -2.564, -0.894)
def test_conversion_to_xyz(self):
xyz = convert_color(self.color, XYZColor)
self.assertColorMatch(xyz, XYZColor(0.001, 0.002, 0.003))
def test_conversion_to_lchuv(self):
lch = convert_color(self.color, LCHuvColor)
self.assertColorMatch(lch, LCHuvColor(1.807, 2.715, 199.222))
def test_convert_to_self(self):
same_color = convert_color(self.color, LuvColor)
self.assertEqual(self.color, same_color)
class LCHabConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = LCHabColor(1.807, 4.532, 214.191)
def test_conversion_to_lab(self):
lab = convert_color(self.color, LabColor)
self.assertColorMatch(lab, LabColor(1.807, -3.749, -2.547))
def test_conversion_to_rgb_zero_div(self):
"""
The formula I grabbed for LCHuv to XYZ had a zero division error in it
if the L coord was 0. Also check against LCHab in case.
Issue #13 in the Google Code tracker.
"""
lchab = LCHabColor(0.0, 0.0, 0.0)
rgb = convert_color(lchab, RGBColor)
self.assertColorMatch(rgb, RGBColor(0.0, 0.0, 0.0))
def test_convert_to_self(self):
same_color = convert_color(self.color, LCHabColor)
self.assertEqual(self.color, same_color)
class LCHuvConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = LCHuvColor(1.807, 2.715, 199.228)
def test_conversion_to_luv(self):
luv = convert_color(self.color, LuvColor)
self.assertColorMatch(luv, LuvColor(1.807, -2.564, -0.894))
def test_conversion_to_rgb_zero_div(self):
"""
The formula I grabbed for LCHuv to XYZ had a zero division error in it
if the L coord was 0. Check against that here.
Issue #13 in the Google Code tracker.
"""
lchuv = LCHuvColor(0.0, 0.0, 0.0)
rgb = convert_color(lchuv, RGBColor)
self.assertColorMatch(rgb, RGBColor(0.0, 0.0, 0.0))
def test_convert_to_self(self):
same_color = convert_color(self.color, LCHuvColor)
self.assertEqual(self.color, same_color)
class RGBConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = RGBColor(0.482, 0.784, 0.196, rgb_type='sRGB')
def test_to_xyz_and_back(self):
xyz = convert_color(self.color, XYZColor)
rgb = convert_color(xyz, RGBColor)
self.assertColorMatch(rgb, self.color)
def test_conversion_to_hsl_max_r(self):
color = RGBColor(255, 123, 50, rgb_type='sRGB', is_upscaled=True)
hsl = convert_color(color, HSLColor)
self.assertColorMatch(hsl, HSLColor(21.366, 1.000, 0.598))
def test_conversion_to_hsl_max_g(self):
color = RGBColor(123, 255, 50, rgb_type='sRGB', is_upscaled=True)
hsl = convert_color(color, HSLColor)
self.assertColorMatch(hsl, HSLColor(98.634, 1.000, 0.598))
def test_conversion_to_hsl_max_b(self):
color = RGBColor(0.482, 0.482, 1.0, rgb_type='sRGB')
hsl = convert_color(color, HSLColor)
self.assertColorMatch(hsl, HSLColor(240.000, 1.000, 0.741))
def test_conversion_to_hsl_gray(self):
color = RGBColor(0.482, 0.482, 0.482, rgb_type='sRGB')
hsl = convert_color(color, HSLColor)
self.assertColorMatch(hsl, HSLColor(0.000, 0.000, 0.482))
def test_conversion_to_hsv(self):
hsv = convert_color(self.color, HSVColor)
self.assertColorMatch(hsv, HSVColor(90.816, 0.750, 0.784))
def test_conversion_to_cmy(self):
cmy = convert_color(self.color, CMYColor)
self.assertColorMatch(cmy, CMYColor(0.518, 0.216, 0.804))
def test_srgb_conversion_to_xyz_d50(self):
"""
sRGB's native illuminant is D65. Test the XYZ adaptations by setting
a target illuminant to something other than D65.
"""
xyz = convert_color(self.color, XYZColor, target_illuminant='D50')
self.assertColorMatch(xyz, XYZColor(0.313, 0.460, 0.082))
def test_srgb_conversion_to_xyz_d65(self):
"""
sRGB's native illuminant is D65. This is a straightforward conversion.
"""
xyz = convert_color(self.color, XYZColor)
self.assertColorMatch(xyz, XYZColor(0.294, 0.457, 0.103))
def test_adobe_conversion_to_xyz_d65(self):
"""
Adobe RGB's native illuminant is D65, like sRGB's. However, sRGB uses
different conversion math that uses gamma, so test the alternate logic
route for non-sRGB RGB colors.
"""
adobe = RGBColor(0.482, 0.784, 0.196, rgb_type='adobe_rgb')
xyz = convert_color(adobe, XYZColor)
self.assertColorMatch(xyz, XYZColor(0.230, 0.429, 0.074))
def test_adobe_conversion_to_xyz_d50(self):
"""
Adobe RGB's native illuminant is D65, so an adaptation matrix is
involved here. However, the math for sRGB and all other RGB types is
different, so test all of the other types with an adaptation matrix
here.
"""
adobe = RGBColor(0.482, 0.784, 0.196, rgb_type='adobe_rgb')
xyz = convert_color(adobe, XYZColor, target_illuminant='D50')
self.assertColorMatch(xyz, XYZColor(0.247, 0.431, 0.060))
def test_convert_to_self(self):
same_color = convert_color(self.color, RGBColor)
self.assertEqual(self.color, same_color)
def test_get_rgb_hex(self):
hex_str = self.color.get_rgb_hex()
self.assertEqual(hex_str, "#7bc832", "sRGB to hex conversion failed")
def test_set_from_rgb_hex(self):
rgb = RGBColor.new_from_rgb_hex('#7bc832')
self.assertColorMatch(rgb, RGBColor(0.482, 0.784, 0.196))
class HSLConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = HSLColor(200.0, 0.400, 0.500)
def test_conversion_to_rgb(self):
rgb = convert_color(self.color, RGBColor)
self.assertColorMatch(rgb, RGBColor(0.300, 0.567, 0.700))
def test_convert_to_self(self):
same_color = convert_color(self.color, HSLColor)
self.assertEqual(self.color, same_color)
class HSVConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = HSVColor(91.0, 0.750, 0.784)
def test_conversion_to_rgb(self):
rgb = convert_color(self.color, RGBColor)
self.assertColorMatch(rgb, RGBColor(0.480, 0.784, 0.196))
def test_convert_to_self(self):
same_color = convert_color(self.color, HSVColor)
self.assertEqual(self.color, same_color)
class CMYConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = CMYColor(0.518, 0.216, 0.804)
def test_conversion_to_cmyk(self):
cmyk = convert_color(self.color, CMYKColor)
self.assertColorMatch(cmyk, CMYKColor(0.385, 0.000, 0.750, 0.216))
def test_conversion_to_rgb(self):
rgb = convert_color(self.color, RGBColor)
self.assertColorMatch(rgb, RGBColor(0.482, 0.784, 0.196))
def test_convert_to_self(self):
same_color = convert_color(self.color, CMYColor)
self.assertEqual(self.color, same_color)
class CMYKConversionTestCase(BaseColorConversionTest):
def setUp(self):
self.color = CMYKColor(0.385, 0.000, 0.750, 0.216)
def test_conversion_to_cmy(self):
cmy = convert_color(self.color, CMYColor)
self.assertColorMatch(cmy, CMYColor(0.518, 0.216, 0.804))
def test_convert_to_self(self):
same_color = convert_color(self.color, CMYKColor)
self.assertEqual(self.color, same_color)
| 37.74928
| 80
| 0.65379
|
b654cfd85a6f6972298b42083c2a477475c2bd5a
| 1,334
|
py
|
Python
|
events.py
|
shurain/archbold
|
887524c05737da9975bed712f5267af3cc4c3581
|
[
"MIT"
] | 2
|
2015-10-24T09:24:51.000Z
|
2015-10-26T18:38:29.000Z
|
events.py
|
shurain/archbold
|
887524c05737da9975bed712f5267af3cc4c3581
|
[
"MIT"
] | null | null | null |
events.py
|
shurain/archbold
|
887524c05737da9975bed712f5267af3cc4c3581
|
[
"MIT"
] | null | null | null |
import logging
import subprocess
from functools import wraps
from settings import ALLOWED_USERS, DEFAULT_RESPONSE, NOT_ALLOWED_RESPONSE
logger = logging.getLogger('archbold_basic')
def auth_required(func):
@wraps(func)
def wrapper(update, match):
user_id = update.message.from_user.id
if user_id not in ALLOWED_USERS:
return NOT_ALLOWED_RESPONSE
return func(update, match)
return wrapper
# List of events
def start(update, match):
user_id = update.message.from_user.id
answer = "You have to be included in the ALLOWED_USERS. Your ID is {}".format(user_id)
return answer
@auth_required
def exec_command(update, match):
commands = match.group('exec_command').split(' ')
# if there is no command, it will return null
command = commands[0]
params = commands[1:]
try:
answer = subprocess.check_output([command, ' '.join(params)],
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as e:
answer = e.output
logger.debug(answer)
if not answer.strip():
answer = DEFAULT_RESPONSE
return answer
# (event pattern, callback)
events = [
("/start", start),
("^/exec\s(?P<exec_command>[^$]+)", exec_command),
]
| 24.254545
| 90
| 0.64018
|
526ff2cd2859091155bf386fee14a300d5e4e265
| 2,088
|
py
|
Python
|
microrganist/__init__.py
|
tgbugs/microrangist
|
451e992ee4366ece1c03281ffb43486f688293f8
|
[
"MIT"
] | null | null | null |
microrganist/__init__.py
|
tgbugs/microrangist
|
451e992ee4366ece1c03281ffb43486f688293f8
|
[
"MIT"
] | null | null | null |
microrganist/__init__.py
|
tgbugs/microrangist
|
451e992ee4366ece1c03281ffb43486f688293f8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
""" Kernel needs CONFIG_INPUT_UINPUT set and/or load the uinput
module. Install pyusb and python-evdev. Run as root. """
import usb.core
import usb.util
import evdev
from evdev import ecodes
dev = usb.core.find(idVendor=0x05f3, idProduct=0x00ff)
if dev is None:
raise ValueError('dev not found')
# get an endpoint instance
cfg = dev.get_active_configuration()
intf = cfg[(0,0)]
end_in = usb.util.find_descriptor(
intf,
# match the first IN endpoint
custom_match = \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_IN)
# lol nice bitmask
left = 0b001
mid = 0b010
right = 0b100
# set your keybindings here
key_map = {
left: ecodes.KEY_LEFTALT,
mid: ecodes.KEY_LEFTCTRL,
right: ecodes.KEY_LEFTSHIFT,
}
ui = evdev.uinput.UInput(
name='VEC Footpedal Keyboard',
)
try:
if dev.is_kernel_driver_active(intf.index):
dev.detach_kernel_driver(intf.index)
usb.util.claim_interface(dev, intf.index)
previous_state = 0
while True:
try:
data = dev.read(end_in.bEndpointAddress, end_in.wMaxPacketSize, 5000)
# use a 5 second timeout which is quite infrequent
# enough to start blocking again and limits shutdown time
state, _ = data
diff = state ^ previous_state
l = diff & left
dl = l & state
lop = (l, dl)
m = diff & mid
dm = (m & state) >> 1
mop = (m, dm)
r = diff & right
dr = (r & state) >> 2
dop = (r, dr)
previous_state = state
for k, dk in [lop, mop, dop]:
if k:
key = key_map[k]
ui.write(evdev.ecodes.EV_KEY, key, dk)
ui.syn()
except usb.core.USBTimeoutError as e:
pass
except usb.core.USBError as e:
print(e, type(e))
finally:
usb.util.release_interface(dev, intf.index)
dev.attach_kernel_driver(intf.index)
| 24
| 81
| 0.588123
|
0e2b1e0932a1e36d8ff5f038e7d31ff803df6266
| 1,794
|
py
|
Python
|
pythonAlgorithm/datastrcture/Kth Smallest Number in Sorted Matrix.py
|
Sky-zzt/lintcodePractice
|
d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1
|
[
"MIT"
] | 1
|
2020-09-15T07:58:55.000Z
|
2020-09-15T07:58:55.000Z
|
pythonAlgorithm/datastrcture/Kth Smallest Number in Sorted Matrix.py
|
Sky-zzt/lintcodePractice
|
d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1
|
[
"MIT"
] | null | null | null |
pythonAlgorithm/datastrcture/Kth Smallest Number in Sorted Matrix.py
|
Sky-zzt/lintcodePractice
|
d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1
|
[
"MIT"
] | null | null | null |
import heapq
class Solution:
"""
@param matrix: a matrix of integers
@param k: An integer
@return: the kth smallest number in the matrix
在一个排序矩阵中找从小到大的第 k 个整数。
排序矩阵的定义为:每一行递增,每一列也递增。
Example
样例 1:
输入:
[
[1 ,5 ,7],
[3 ,7 ,8],
[4 ,8 ,9],
]
k = 4
输出: 5
样例 2:
输入:
[
[1, 2],
[3, 4]
]
k = 3
输出: 3
Challenge
时间复杂度 O(klogn), n 是矩阵的宽度和高度的最大值
"""
# todo 用java的treeset 自带排序 remove还是log的
def kthSmallest(self, nums, k):
# write your code here
self.minheap, self.maxheap = [], []
medians = []
for i in range(len(nums)):
self.add(nums[i], i, k)
medians.append(self.median)
return medians
@property
def median(self):
if len(self.minheap) > len(self.maxheap):
return self.minheap[0]
return -self.maxheap[0]
def add(self, value, index, winsize):
if len(self.maxheap) + len(self.minheap) > winsize: # todo
self.remove(index - winsize)
if len(self.maxheap) == 0:
heapq.heappush(self.maxheap, -value)
return
if -self.maxheap[0] < value:
heapq.heappush(self.minheap, value)
else:
heapq.heappush(self.maxheap, -value)
self.modifyTwoHeapsSize()
def remove(self, idx):
if idx in self.minheap:
self.minheap.remove(idx)
else:
self.maxheap.remove(idx)
def modifyTwoHeapsSize(self):
if len(self.maxheap) + 2 == len(self.minheap):
heapq.heappush(self.maxheap, -heapq.heappop(self.minheap))
if len(self.minheap) + 2 == len(self.maxheap):
heapq.heappush(self.minheap, -heapq.heappop(self.maxheap))
| 23.92
| 70
| 0.544593
|
8ea3e02076c3d81b767404365a3769e117429275
| 13,881
|
py
|
Python
|
memberportal/profile/xerohelpers.py
|
snoopen/MemberMatters
|
218d215aec3a32a34d7ff3c32df5b5525c461bd9
|
[
"MIT"
] | null | null | null |
memberportal/profile/xerohelpers.py
|
snoopen/MemberMatters
|
218d215aec3a32a34d7ff3c32df5b5525c461bd9
|
[
"MIT"
] | null | null | null |
memberportal/profile/xerohelpers.py
|
snoopen/MemberMatters
|
218d215aec3a32a34d7ff3c32df5b5525c461bd9
|
[
"MIT"
] | null | null | null |
from xero import Xero
from xero.auth import PrivateCredentials
from xero.exceptions import XeroBadRequest
from constance import config
import datetime
import os
xero_rsa = os.environ.get("PORTAL_XERO_RSA_FILE", "/usr/src/data/xerkey.pem")
def get_xero_contact(user):
"""
Returns an object with the xero contact details or None if it
doesn't exist.
:return:
"""
if "PORTAL_XERO_CONSUMER_KEY" in os.environ:
with open(xero_rsa) as keyfile:
rsa_key = keyfile.read()
credentials = PrivateCredentials(
os.environ.get("PORTAL_XERO_CONSUMER_KEY"), rsa_key
)
xero = Xero(credentials)
email = xero.contacts.filter(EmailAddress=user.profile.email)
name = xero.contacts.filter(Name=user.profile.get_full_name())
if email:
return email
elif name:
return name
return None
else:
return "Invalid Xero API details."
def generate_account_number(profile):
if "PORTAL_XERO_CONSUMER_KEY" in os.environ:
with open(xero_rsa) as keyfile:
rsa_key = keyfile.read()
credentials = PrivateCredentials(
os.environ.get("PORTAL_XERO_CONSUMER_KEY", "/usr/src/data/xerkey.pem"),
rsa_key,
)
xero = Xero(credentials)
contacts = xero.contacts.filter(includeArchived=True)
for x in range(100, 999):
account_number = profile.first_name[0] + profile.last_name[:2] + str(x)
account_number = account_number.upper()
if not any(
d.get("AccountNumber", None) == account_number for d in contacts
):
profile.xero_account_number = account_number
profile.save()
print("Generated Xero Account: " + account_number)
return profile.xero_account_number
else:
return False
def sync_xero_accounts(users):
if "PORTAL_XERO_CONSUMER_KEY" in os.environ:
with open(xero_rsa) as keyfile:
rsa_key = keyfile.read()
credentials = PrivateCredentials(
os.environ.get("PORTAL_XERO_CONSUMER_KEY", "/usr/src/data/xerkey.pem"),
rsa_key,
)
xero = Xero(credentials)
contacts = xero.contacts.filter(includeArchived=True)
matches = []
non_matches = []
for user in users:
profile = user.profile
if profile.state == "noob":
print(
"Not syncing new member ({} {}).".format(
user.profile.get_full_name(), user.email
)
)
continue
if profile.xero_account_id or profile.xero_account_number:
print(
"{} already has xero details ({} {})".format(
user.profile.get_full_name(),
profile.xero_account_number,
profile.xero_account_id,
)
)
continue
else:
contact = next(
(
item
for item in contacts
if str(item["EmailAddress"]).lower() == str(user.email).lower()
),
None,
)
if contact:
print(
"Found match for {} ({})".format(
profile.get_full_name(), user.email
)
+ str(contact)
)
if "AccountNumber" not in contact:
if contact["ContactStatus"] == "ARCHIVED":
continue
else:
raise FileNotFoundError(
"No account number exists for "
+ user.profile.get_full_name()
)
user.profile.xero_account_number = contact["AccountNumber"]
user.profile.xero_account_id = contact["ContactID"]
user.profile.save()
matches.append(user)
else:
print(
"No match found for {} ({})".format(
profile.get_full_name(), user.email
)
)
non_matches.append(user)
message = "\nDone syncing {} users. Found {} matches and {} non-matches. {} users untouched.".format(
len(users),
len(matches),
len(non_matches),
str(len(users) - (len(matches) + len(non_matches))),
)
print(message)
print("\nMatched Users:")
for match in matches:
print(match.profile.get_full_name())
print("\nNon-matched Users:")
for non_match in non_matches:
print(non_match.profile.get_full_name() + " " + non_match.email)
non_matches_string = ""
for non_match in non_matches:
non_matches_string += "{} ({}), ".format(
non_match.profile.get_full_name(), non_match.email
)
return message + "Non matches: " + non_matches_string
else:
return False
def add_to_xero(profile):
if "PORTAL_XERO_CONSUMER_KEY" in os.environ:
with open(xero_rsa) as keyfile:
rsa_key = keyfile.read()
credentials = PrivateCredentials(
os.environ.get("PORTAL_XERO_CONSUMER_KEY", "/usr/src/data/xerkey.pem"),
rsa_key,
)
xero = Xero(credentials)
contact = [
{
"AccountNumber": generate_account_number(profile),
"ContactStatus": "ACTIVE",
"Name": profile.get_full_name(),
"FirstName": profile.first_name,
"LastName": profile.last_name,
"EmailAddress": profile.user.email,
"Addresses": [
{
"AddressType": "STREET",
"City": "",
"Region": "",
"PostalCode": "",
"Country": "",
"AttentionTo": "",
}
],
"Phones": [
{
"PhoneType": "DEFAULT",
"PhoneNumber": profile.phone,
"PhoneAreaCode": "",
"PhoneCountryCode": "",
}
],
"IsSupplier": False,
"IsCustomer": True,
"DefaultCurrency": "AU",
}
]
try:
result = xero.contacts.put(contact)
except XeroBadRequest as e:
error = str(e)
if "is already assigned to another contact" in error:
error = "That contact name is already in Xero."
return "Error: " + error
if result:
print(result)
profile.xero_account_id = result[0]["ContactID"]
profile.save()
return "Successfully added to Xero."
else:
return "Error adding to Xero."
else:
return "Error adding to Xero. No Xero API details."
def _create_xero_invoice(payload):
if "PORTAL_XERO_CONSUMER_KEY" in os.environ:
with open(xero_rsa) as keyfile:
rsa_key = keyfile.read()
credentials = PrivateCredentials(
os.environ.get("PORTAL_XERO_CONSUMER_KEY", "/usr/src/data/xerkey.pem"),
rsa_key,
)
xero = Xero(credentials)
# Monkey patch the library to support online invoices.
def get_onlineinvoice(id, headers=None, summarize_errors=None):
uri = "/".join(
[xero.invoices.base_url, xero.invoices.name, id, "OnlineInvoice"]
)
params = {}
if not summarize_errors:
params["summarizeErrors"] = "false"
return uri, params, "get", None, headers, False
xero.invoices.get_onlineinvoice = xero.invoices._get_data(get_onlineinvoice)
# try to create the invoice
result = xero.invoices.put(payload)
invoice_url = xero.invoices.get_onlineinvoice(result[0]["InvoiceID"])[
"OnlineInvoices"
][0]["OnlineInvoiceUrl"]
return {
"invoice_id": result[0]["InvoiceID"],
"invoice_number": result[0]["InvoiceNumber"],
"invoice_reference": result[0]["Reference"],
"invoice_link": invoice_url,
}
else:
return None
def create_membership_invoice(user, email_invoice=False):
next_month = datetime.date.today().month + 1
this_year = datetime.date.today().year
if next_month == 13:
next_month = 1
this_year += 1
next_month_date = datetime.datetime(this_year, next_month, 1)
line_items = [
{
"Description": f"{config.SITE_OWNER}: " + user.profile.member_type.name,
"Quantity": "1",
"ItemCode": user.profile.member_type.name,
"UnitAmount": round(user.profile.member_type.cost * 0.7, 2),
"TaxType": config.XERO_TAX_TYPE,
"AccountCode": config.XERO_MEMBERSHIP_ACCOUNT_CODE,
}
]
payload = {
"Type": "ACCREC",
"Contact": {"ContactID": user.profile.xero_account_id},
"DueDate": next_month_date,
"LineAmountTypes": "Inclusive",
"LineItems": line_items,
"Status": "AUTHORISED",
"Reference": user.profile.xero_account_number,
"Url": config.SITE_URL,
}
try:
invoice = _create_xero_invoice(payload)
except:
return "Error creating Xero Invoice"
# if we're successful and email == True send it
if email_invoice:
user.email_invoice(
user.profile.first_name,
user.profile.member_type.cost,
invoice["invoice_number"],
next_month_date.strftime("%d-%m-%Y"),
invoice["invoice_reference"],
invoice["invoice_link"],
)
# TODO fix the import of this helper
# log_user_event(
# user,
# "Created invoice for $"
# + str(user.profile.member_type.cost)
# + "("
# + invoice["invoice_id"]
# + ")",
# "xero",
# )
if invoice:
return "Successfully created invoice {} in Xero.".format(
invoice["invoice_number"]
)
else:
return "Error creating invoice in Xero."
def create_stripe_membership_invoice(user, amount, fee_amount):
line_items = [
{
"Description": f"{config.SITE_OWNER} ",
"Quantity": "1",
"ItemCode": config.XERO_MEMBERSHIP_ITEM_CODE,
"UnitAmount": amount,
"TaxType": config.XERO_TAX_TYPE,
"AccountCode": config.XERO_MEMBERSHIP_ACCOUNT_CODE,
},
{
"Description": f"Stripe fee",
"Quantity": "1",
"ItemCode": config.XERO_STRIPE_FEE_ITEM_CODE,
"UnitAmount": fee_amount,
"TaxType": config.XERO_TAX_TYPE,
"AccountCode": config.XERO_STRIPE_FEE_ACCOUNT_CODE,
},
]
payload = {
"Type": "ACCREC",
"Contact": {"ContactID": user.profile.xero_account_id},
"DueDate": datetime.datetime.now(),
"LineAmountTypes": "Inclusive",
"LineItems": line_items,
"Status": "AUTHORISED",
"Reference": user.profile.xero_account_number,
"Url": config.SITE_URL,
}
invoice = _create_xero_invoice(payload)
# TODO fix the import of this helper
# log_user_event(
# user,
# "Created invoice for $" + str(amount) + "(" + invoice["invoice_id"] + ")",
# "xero",
# )
if invoice:
return invoice
else:
return None
def create_stripe_memberbucks_invoice(user, amount, fee_amount):
line_items = [
{
"Description": f"{config.SITE_OWNER} ",
"Quantity": "1",
"ItemCode": config.XERO_MEMBERBUCKS_ITEM_CODE,
"UnitAmount": amount,
"TaxType": config.XERO_TAX_TYPE,
"AccountCode": config.XERO_MEMBERBUCKS_ACCOUNT_CODE,
},
{
"Description": f"Stripe fee",
"Quantity": "1",
"ItemCode": config.XERO_STRIPE_FEE_ITEM_CODE,
"UnitAmount": fee_amount,
"TaxType": config.XERO_TAX_TYPE,
"AccountCode": config.XERO_STRIPE_FEE_ACCOUNT_CODE,
},
]
payload = {
"Type": "ACCREC",
"Contact": {"ContactID": user.profile.xero_account_id},
"DueDate": datetime.datetime.now(),
"LineAmountTypes": "Inclusive",
"LineItems": line_items,
"Status": "AUTHORISED",
"Reference": user.profile.xero_account_number,
"Url": config.SITE_URL,
}
invoice = _create_xero_invoice(payload)
# TODO fix the import of this helper
# log_user_event(
# user,
# "Created invoice for $" + str(amount) + "(" + invoice["invoice_id"] + ")",
# "xero",
# )
if invoice:
return invoice
else:
return None
| 32.661176
| 110
| 0.507744
|
93a5db8edcd7fb5dca1e45a79dcf65d90fe57e22
| 250
|
py
|
Python
|
src/py/devops_tools/contribute.py
|
StatisKit/license
|
e93fc2fd81d82eff3f6c336d5f29e5938062cc32
|
[
"Apache-2.0"
] | null | null | null |
src/py/devops_tools/contribute.py
|
StatisKit/license
|
e93fc2fd81d82eff3f6c336d5f29e5938062cc32
|
[
"Apache-2.0"
] | 1
|
2018-02-28T14:15:09.000Z
|
2018-02-28T14:15:09.000Z
|
src/py/devops_tools/contribute.py
|
StatisKit/license
|
e93fc2fd81d82eff3f6c336d5f29e5938062cc32
|
[
"Apache-2.0"
] | 51
|
2018-02-28T15:21:01.000Z
|
2021-03-15T18:26:26.000Z
|
def fork_repository(organization, repository, username):
pass
def clone_repository(username, repository, directory):
pass
def add_upstream(organization, repository, username, directory):
pass
def install_recipes(directory):
pass
| 17.857143
| 64
| 0.764
|
130b813bc44b6abfaf1e754c821bb39018d39f51
| 1,656
|
py
|
Python
|
config/urls.py
|
juftin/golfr
|
d94b79eca6a50bc645cf9f0814ad2bc1517c1344
|
[
"MIT"
] | null | null | null |
config/urls.py
|
juftin/golfr
|
d94b79eca6a50bc645cf9f0814ad2bc1517c1344
|
[
"MIT"
] | 17
|
2021-11-22T18:27:32.000Z
|
2022-03-29T18:30:49.000Z
|
config/urls.py
|
juftin/golfr
|
d94b79eca6a50bc645cf9f0814ad2bc1517c1344
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView
urlpatterns = [
path("", TemplateView.as_view(template_name="pages/home.html"), name="home"),
path(
"about/", TemplateView.as_view(template_name="pages/about.html"), name="about"
),
# Django Admin, use {% url 'admin:index' %}
path(settings.ADMIN_URL, admin.site.urls),
# User management
path("users/", include("golfr.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
| 35.234043
| 86
| 0.646739
|
bb7bd28999f0abfc89295450bdf779c41f5f575a
| 22,905
|
py
|
Python
|
purity_fb/purity_fb_1dot9/apis/object_store_remote_credentials_api.py
|
bsamz-ps/purity_fb_python_client
|
11f27ef0c72d8aac1fc4e1ed036cca038b85dfa4
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot9/apis/object_store_remote_credentials_api.py
|
bsamz-ps/purity_fb_python_client
|
11f27ef0c72d8aac1fc4e1ed036cca038b85dfa4
|
[
"Apache-2.0"
] | null | null | null |
purity_fb/purity_fb_1dot9/apis/object_store_remote_credentials_api.py
|
bsamz-ps/purity_fb_python_client
|
11f27ef0c72d8aac1fc4e1ed036cca038b85dfa4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK. Compatible with REST API versions 1.0 - 1.9. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ObjectStoreRemoteCredentialsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_object_store_remote_credentials(self, remote_credentials, **kwargs):
"""
Create a new object store remote credentials object.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_object_store_remote_credentials(remote_credentials, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ObjectStoreRemoteCredentials remote_credentials: The attribute map used to create the object store remote credentials object. (required)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: ObjectStoreRemoteCredentialsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_object_store_remote_credentials_with_http_info(remote_credentials, **kwargs)
else:
(data) = self.create_object_store_remote_credentials_with_http_info(remote_credentials, **kwargs)
return data
def create_object_store_remote_credentials_with_http_info(self, remote_credentials, **kwargs):
"""
Create a new object store remote credentials object.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_object_store_remote_credentials_with_http_info(remote_credentials, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ObjectStoreRemoteCredentials remote_credentials: The attribute map used to create the object store remote credentials object. (required)
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: ObjectStoreRemoteCredentialsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['remote_credentials', 'names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_object_store_remote_credentials" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'remote_credentials' is set
if ('remote_credentials' not in params) or (params['remote_credentials'] is None):
raise ValueError("Missing the required parameter `remote_credentials` when calling `create_object_store_remote_credentials`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'remote_credentials' in params:
body_params = params['remote_credentials']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/object-store-remote-credentials', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectStoreRemoteCredentialsResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_object_store_remote_credentials(self, **kwargs):
"""
Delete an object store remote credentials object.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_object_store_remote_credentials(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_object_store_remote_credentials_with_http_info(**kwargs)
else:
(data) = self.delete_object_store_remote_credentials_with_http_info(**kwargs)
return data
def delete_object_store_remote_credentials_with_http_info(self, **kwargs):
"""
Delete an object store remote credentials object.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_object_store_remote_credentials_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ids', 'names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_object_store_remote_credentials" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/object-store-remote-credentials', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_object_store_remote_credentials(self, **kwargs):
"""
List object store remote credentials.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_object_store_remote_credentials(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: The filter to be used for query.
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param int limit: limit, should be >= 0
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:return: ObjectStoreRemoteCredentialsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_object_store_remote_credentials_with_http_info(**kwargs)
else:
(data) = self.list_object_store_remote_credentials_with_http_info(**kwargs)
return data
def list_object_store_remote_credentials_with_http_info(self, **kwargs):
"""
List object store remote credentials.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_object_store_remote_credentials_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: The filter to be used for query.
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param int limit: limit, should be >= 0
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:param str sort: The way to order the results.
:param int start: start
:param str token: token
:return: ObjectStoreRemoteCredentialsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filter', 'ids', 'limit', 'names', 'sort', 'start', 'token']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_object_store_remote_credentials" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
if 'start' in params:
query_params.append(('start', params['start']))
if 'token' in params:
query_params.append(('token', params['token']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/object-store-remote-credentials', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectStoreRemoteCredentialsResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_object_store_remote_credentials(self, remote_credentials, **kwargs):
"""
Update an existing object store remote credentials object.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_object_store_remote_credentials(remote_credentials, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ObjectStoreRemoteCredentials remote_credentials: The attribute map used to update the object store remote credentials object. (required)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: ObjectStoreRemoteCredentialsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_object_store_remote_credentials_with_http_info(remote_credentials, **kwargs)
else:
(data) = self.update_object_store_remote_credentials_with_http_info(remote_credentials, **kwargs)
return data
def update_object_store_remote_credentials_with_http_info(self, remote_credentials, **kwargs):
"""
Update an existing object store remote credentials object.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_object_store_remote_credentials_with_http_info(remote_credentials, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ObjectStoreRemoteCredentials remote_credentials: The attribute map used to update the object store remote credentials object. (required)
:param list[str] ids: A comma-separated list of resource IDs. This cannot be provided together with the name or names query parameters.
:param list[str] names: A comma-separated list of resource names. This cannot be provided together with the ids query parameters.
:return: ObjectStoreRemoteCredentialsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['remote_credentials', 'ids', 'names']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_object_store_remote_credentials" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'remote_credentials' is set
if ('remote_credentials' not in params) or (params['remote_credentials'] is None):
raise ValueError("Missing the required parameter `remote_credentials` when calling `update_object_store_remote_credentials`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'remote_credentials' in params:
body_params = params['remote_credentials']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.9/object-store-remote-credentials', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ObjectStoreRemoteCredentialsResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 46.272727
| 249
| 0.60668
|
02e637442518bd86e8ab8ea0378a64b35b6ecf02
| 3,036
|
py
|
Python
|
BeaconCharityServer/app-server/app.py
|
hanks/Second_Hackson_Demo
|
a0c76da6bfbff847be056d21481ad7e37826f617
|
[
"MIT"
] | null | null | null |
BeaconCharityServer/app-server/app.py
|
hanks/Second_Hackson_Demo
|
a0c76da6bfbff847be056d21481ad7e37826f617
|
[
"MIT"
] | null | null | null |
BeaconCharityServer/app-server/app.py
|
hanks/Second_Hackson_Demo
|
a0c76da6bfbff847be056d21481ad7e37826f617
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from flask import Flask, jsonify, url_for, render_template, send_from_directory, request
from redis import Redis
import json
import os
from models import CharityItem
from managers import RedisManager
app = Flask(__name__)
redis_manager = RedisManager()
@app.route("/charityitem/<major>/<minor>", methods=["GET"])
def charity_item(major, minor):
data_dict = redis_manager.get_dict(major, minor)
return jsonify(data_dict)
@app.route("/charityitems/<major>", methods=["GET"])
def charity_items(major):
data_dicts = redis_manager.get_dicts(major)
result = {"charityItems": data_dicts}
return jsonify(result)
@app.route("/charityitem/<major>/<minor>", methods=["POST"])
def update_charity_item(major, minor):
donation = int(request.form.get("donation"))
original_data_dict = redis_manager.get_dict(major, minor)
new_donation = int(original_data_dict["actual_money"]) + donation
new_data_dict = original_data_dict
new_data_dict["actual_money"] = str(new_donation)
result = {}
try:
redis_manager.set_dict(new_data_dict, major, minor)
except:
pass
else:
result = {'status': 'ok'}
return jsonify(result)
@app.route("/image/<name>")
def image(name):
filename="images/{}".format(name)
return send_from_directory("static", filename)
def init_test_data():
# add item1
item = CharityItem(u"富士山寄付", u"富士山の登山道を改善する", u"富士山の登山道を改善する富士山の登山道を改善する富士山の登山道を改善する富士山の登山道を改善する富士山の登山道を改善する富士山の登山道を改善する富士山の登山道を改善する富士山の登山道を改善する", "1.png", "11.png", 3, 12, 1, 2000, 1000)
dictionary = item.to_dict()
redis_manager.set_dict(dictionary, item.major, item.minor)
# add item2
item = CharityItem(u"水族館存続のために!", u"水族館存続のために!", u"水族館存続のために!水族館存続のために!水族館存続のために!水族館存続のために!水族館存続のために!水族館存続のために!水族館存続のために!水族館存続のために!", "2.png", "12.png", 2, 12, 2, 2000, 1100)
dictionary = item.to_dict()
redis_manager.set_dict(dictionary, item.major, item.minor)
# add item3
item = CharityItem(u"古民家の再活用のために!", u"古民家の再活用のために!", u"古民家の再活用のために!古民家の再活用のために!古民家の再活用のために!古民家の再活用のために!古民家の再活用のために!古民家の再活用のために!古民家の再活用のために!古民家の再活用のために!", "3.png", "13.png", 1, 12, 3, 2000, 900)
dictionary = item.to_dict()
redis_manager.set_dict(dictionary, item.major, item.minor)
# add item4
item = CharityItem(u"美しい山を守る!", u"ゴミを無くして!", u"ゴミを無くして美しい山を取り戻したい!ゴミを無くして美しい山を取り戻したい!ゴミを無くして美しい山を取り戻したい!ゴミを無くして美しい山を取り戻したい!ゴミを無くして美しい山を取り戻したい!ゴミを無くして美しい山を取り戻したい!ゴミを無くして美しい山を取り戻したい!", "4.png", "14.png", 4, 12, 4, 2000, 800)
dictionary = item.to_dict()
redis_manager.set_dict(dictionary, item.major, item.minor)
# add item5
item = CharityItem(u"お酒作りの美味しい水を守りたい!", u"お酒作りの美味しい水を守りたい!", u"お酒作りの美味しい水を守りたい!お酒作りの美味しい水を守りたい!お酒作りの美味しい水を守りたい!お酒作りの美味しい水を守りたい!お酒作りの美味しい水を守りたい!お酒作りの美味しい水を守りたい!お酒作りの美味しい水を守りたい!", "5.png", "15.png", 2, 12, 5, 2000, 700)
dictionary = item.to_dict()
redis_manager.set_dict(dictionary, item.major, item.minor)
if __name__ == "__main__":
init_test_data()
app.run(host="0.0.0.0", debug=True)
| 38.923077
| 234
| 0.724308
|
3fecfa14b8d886fbe9962ec8c683860d6f9822bd
| 4,916
|
py
|
Python
|
src/vc_visual_verifier/settings.py
|
MonolithicMonk/vc-visual-verifier
|
2199a4b0f93993bccf00b5d8504b0d7c126ae3fd
|
[
"Apache-2.0"
] | null | null | null |
src/vc_visual_verifier/settings.py
|
MonolithicMonk/vc-visual-verifier
|
2199a4b0f93993bccf00b5d8504b0d7c126ae3fd
|
[
"Apache-2.0"
] | null | null | null |
src/vc_visual_verifier/settings.py
|
MonolithicMonk/vc-visual-verifier
|
2199a4b0f93993bccf00b5d8504b0d7c126ae3fd
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for vc_visual_verifier project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
def parse_bool(val):
return val and val != "0" and str(val).lower() != "false"
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv(
"DJANGO_SECRET_KEY",
# safe value used for development when DJANGO_SECRET_KEY might not be set
"@ml^(k%**i84a2#m6em1^)rt-%chwas3z#w0sz=q3w0ng8zm77",
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = parse_bool(os.getenv("DJANGO_DEBUG", "False"))
ALLOWED_HOSTS = ["*"]
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"oidc_rp",
"vc_visual_verifier",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"oidc_rp.middleware.OIDCRefreshIDTokenMiddleware",
]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"oidc_rp.backends.OIDCAuthBackend",
)
AUTH_USER_MODEL = "vc_visual_verifier.User"
ROOT_URLCONF = "vc_visual_verifier.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "vc_visual_verifier/templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"oidc_rp.context_processors.oidc",
],
},
},
]
WSGI_APPLICATION = "vc_visual_verifier.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Settings for django-oidc-rp
OIDC_RP_PROVIDER_ENDPOINT = os.getenv("OIDC_RP_PROVIDER_ENDPOINT")
OIDC_RP_PROVIDER_AUTHORIZATION_ENDPOINT = (
f"{OIDC_RP_PROVIDER_ENDPOINT}/vc/connect/authorize"
)
OIDC_RP_PROVIDER_TOKEN_ENDPOINT = f"{OIDC_RP_PROVIDER_ENDPOINT}/vc/connect/token"
OIDC_RP_PROVIDER_JWKS_ENDPOINT = (
f"{OIDC_RP_PROVIDER_ENDPOINT}/.well-known/openid-configuration/jwks"
)
OIDC_RP_PROVIDER_USERINFO_ENDPOINT = f"{OIDC_RP_PROVIDER_ENDPOINT}/vc/connect/userinfo"
OIDC_RP_CLIENT_ID = os.getenv("OIDC_RP_CLIENT_ID")
OIDC_RP_CLIENT_SECRET = os.getenv("OIDC_RP_CLIENT_SECRET")
OIDC_RP_PROVIDER_SIGNATURE_ALG = "RS256"
OIDC_RP_SCOPES = os.getenv("OIDC_RP_SCOPES", "openid profile vc_authn")
OIDC_RP_ID_TOKEN_INCLUDE_USERINFO = True
# vc-authn proof-configuration
VC_AUTHN_PRES_REQ_CONF_ID = os.getenv("VC_AUTHN_PRES_REQ_CONF_ID")
# Claims to be checked in the UI
OIDC_CLAIMS_REQUIRED = os.getenv("OIDC_CLAIMS_REQUIRED")
# VC verifier name
VERIFIER_NAME = os.getenv("VERIFIER_NAME", "VC Visual Verifier")
| 29.614458
| 91
| 0.737388
|
9368abe4eb7900cc5bc94f13b1152855c1c4a136
| 9,899
|
py
|
Python
|
test/models/test_approximate_gp.py
|
saitcakmak/botorch
|
fca1fa6ec27814e320564c2041f6c7dba8847ecc
|
[
"MIT"
] | 2,344
|
2019-05-01T04:57:26.000Z
|
2022-03-29T17:00:41.000Z
|
test/models/test_approximate_gp.py
|
saitcakmak/botorch
|
fca1fa6ec27814e320564c2041f6c7dba8847ecc
|
[
"MIT"
] | 942
|
2019-05-01T05:11:30.000Z
|
2022-03-31T21:58:24.000Z
|
test/models/test_approximate_gp.py
|
saitcakmak/botorch
|
fca1fa6ec27814e320564c2041f6c7dba8847ecc
|
[
"MIT"
] | 280
|
2019-05-01T05:14:53.000Z
|
2022-03-29T16:00:33.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import torch
from botorch.models.approximate_gp import (
_SingleTaskVariationalGP,
ApproximateGPyTorchModel,
SingleTaskVariationalGP,
)
from botorch.models.transforms.input import Normalize
from botorch.models.transforms.outcome import Log
from botorch.posteriors import GPyTorchPosterior, TransformedPosterior
from botorch.sampling import IIDNormalSampler
from botorch.utils.testing import BotorchTestCase
from gpytorch.likelihoods import GaussianLikelihood, MultitaskGaussianLikelihood
from gpytorch.mlls import VariationalELBO
from gpytorch.variational import (
IndependentMultitaskVariationalStrategy,
VariationalStrategy,
)
class TestApproximateGP(BotorchTestCase):
def setUp(self):
super().setUp()
self.train_X = torch.rand(10, 1, device=self.device)
self.train_Y = torch.sin(self.train_X) + torch.randn_like(self.train_X) * 0.2
def test_initialization(self):
# test non batch case
model = ApproximateGPyTorchModel(train_X=self.train_X, train_Y=self.train_Y)
self.assertIsInstance(model.model, _SingleTaskVariationalGP)
self.assertIsInstance(model.likelihood, GaussianLikelihood)
self.assertIsInstance(model.model.variational_strategy, VariationalStrategy)
self.assertEqual(model.num_outputs, 1)
# test batch case
stacked_y = torch.cat((self.train_Y, self.train_Y), dim=-1)
model = ApproximateGPyTorchModel(
train_X=self.train_X, train_Y=stacked_y, num_outputs=2
)
self.assertIsInstance(model.model, _SingleTaskVariationalGP)
self.assertIsInstance(model.likelihood, MultitaskGaussianLikelihood)
self.assertIsInstance(
model.model.variational_strategy, IndependentMultitaskVariationalStrategy
)
self.assertEqual(model.num_outputs, 2)
class TestSingleTaskVariationalGP(BotorchTestCase):
def setUp(self):
super().setUp()
train_X = torch.rand(10, 1, device=self.device)
train_y = torch.sin(train_X) + torch.randn_like(train_X) * 0.2
self.model = SingleTaskVariationalGP(
train_X=train_X, likelihood=GaussianLikelihood()
).to(self.device)
mll = VariationalELBO(self.model.likelihood, self.model.model, num_data=10)
loss = -mll(self.model.likelihood(self.model(train_X)), train_y).sum()
loss.backward()
def test_posterior(self):
# basic test of checking that the posterior works as intended
test_x = torch.rand(30, 1, device=self.device)
posterior = self.model.posterior(test_x)
self.assertIsInstance(posterior, GPyTorchPosterior)
posterior = self.model.posterior(test_x, observation_noise=True)
self.assertIsInstance(posterior, GPyTorchPosterior)
# now loop through all possibilities
train_X = torch.rand(3, 10, 1, device=self.device)
train_Y = torch.randn(3, 10, 2, device=self.device)
test_X = torch.rand(3, 5, 1, device=self.device)
non_batched = [train_X[0], train_Y[0, :, 0].unsqueeze(-1), test_X[0]]
non_batched_mo = [train_X[0], train_Y[0], test_X[0]]
batched = [train_X, train_Y[..., 0].unsqueeze(-1), test_X]
# batched multi-output is not supported at this time
# batched_mo = [train_X, train_Y, test_X]
non_batched_to_batched = [train_X[0], train_Y[0], test_X]
all_test_lists = [non_batched, non_batched_mo, batched, non_batched_to_batched]
for [tx, ty, test] in all_test_lists:
print(tx.shape, ty.shape, test.shape)
model = SingleTaskVariationalGP(tx, ty, inducing_points=tx)
posterior = model.posterior(test)
self.assertIsInstance(posterior, GPyTorchPosterior)
def test_variational_setUp(self):
for dtype in [torch.float, torch.double]:
train_X = torch.rand(10, 1, device=self.device, dtype=dtype)
train_y = torch.randn(10, 3, device=self.device, dtype=dtype)
for ty, num_out in [[train_y, 3], [train_y, 1], [None, 3]]:
batched_model = SingleTaskVariationalGP(
train_X,
train_Y=ty,
num_outputs=num_out,
learn_inducing_points=False,
).to(self.device)
mll = VariationalELBO(
batched_model.likelihood, batched_model.model, num_data=10
)
with torch.enable_grad():
loss = -mll(
batched_model.likelihood(batched_model(train_X)), train_y
).sum()
loss.backward()
# ensure that inducing points do not require grad
model_var_strat = batched_model.model.variational_strategy
self.assertEqual(
model_var_strat.base_variational_strategy.inducing_points.grad,
None,
)
# but that the covariance does have a gradient
self.assertIsNotNone(
batched_model.model.covar_module.raw_outputscale.grad
)
# check that we always have three outputs
self.assertEqual(batched_model._num_outputs, 3)
self.assertIsInstance(
batched_model.likelihood, MultitaskGaussianLikelihood
)
def test_likelihood_and_fantasize(self):
self.assertIsInstance(self.model.likelihood, GaussianLikelihood)
self.assertTrue(self.model._is_custom_likelihood, True)
test_X = torch.randn(5, 1, device=self.device)
with self.assertRaises(NotImplementedError):
self.model.fantasize(test_X, sampler=IIDNormalSampler(num_samples=32))
def test_initializations(self):
train_X = torch.rand(15, 1, device=self.device)
train_Y = torch.rand(15, 1, device=self.device)
stacked_train_X = torch.cat((train_X, train_X), dim=0)
for X, num_ind in [[train_X, 5], [stacked_train_X, 20], [stacked_train_X, 5]]:
model = SingleTaskVariationalGP(train_X=X, inducing_points=num_ind)
if num_ind == 5:
self.assertLessEqual(
model.model.variational_strategy.inducing_points.shape,
torch.Size((5, 1)),
)
else:
# should not have 20 inducing points when 15 singular dimensions
# are passed
self.assertLess(
model.model.variational_strategy.inducing_points.shape[-2], num_ind
)
test_X = torch.rand(5, 1, device=self.device)
# test transforms
for inp_trans, out_trans in itertools.product(
[None, Normalize(d=1)], [None, Log()]
):
model = SingleTaskVariationalGP(
train_X=train_X,
train_Y=train_Y,
outcome_transform=out_trans,
input_transform=inp_trans,
)
if inp_trans is not None:
self.assertIsInstance(model.input_transform, Normalize)
else:
self.assertFalse(hasattr(model, "input_transform"))
if out_trans is not None:
self.assertIsInstance(model.outcome_transform, Log)
posterior = model.posterior(test_X)
self.assertIsInstance(posterior, TransformedPosterior)
else:
self.assertFalse(hasattr(model, "outcome_transform"))
def test_inducing_point_init(self):
train_X_1 = torch.rand(15, 1, device=self.device)
train_X_2 = torch.rand(15, 1, device=self.device)
# single-task
model_1 = SingleTaskVariationalGP(train_X=train_X_1, inducing_points=5)
model_1.init_inducing_points(train_X_2)
model_1_inducing = model_1.model.variational_strategy.inducing_points
model_2 = SingleTaskVariationalGP(train_X=train_X_2, inducing_points=5)
model_2_inducing = model_2.model.variational_strategy.inducing_points
self.assertTrue(torch.allclose(model_1_inducing, model_2_inducing))
# multi-task
model_1 = SingleTaskVariationalGP(
train_X=train_X_1, inducing_points=5, num_outputs=2
)
model_1.init_inducing_points(train_X_2)
model_1_inducing = (
model_1.model.variational_strategy.base_variational_strategy.inducing_points
)
model_2 = SingleTaskVariationalGP(
train_X=train_X_2, inducing_points=5, num_outputs=2
)
model_2_inducing = (
model_2.model.variational_strategy.base_variational_strategy.inducing_points
)
self.assertTrue(torch.allclose(model_1_inducing, model_2_inducing))
# batched inputs
train_X_1 = torch.rand(2, 15, 1, device=self.device)
train_X_2 = torch.rand(2, 15, 1, device=self.device)
train_Y = torch.rand(2, 15, 1, device=self.device)
model_1 = SingleTaskVariationalGP(
train_X=train_X_1, train_Y=train_Y, inducing_points=5
)
model_1.init_inducing_points(train_X_2)
model_1_inducing = model_1.model.variational_strategy.inducing_points
model_2 = SingleTaskVariationalGP(
train_X=train_X_2, train_Y=train_Y, inducing_points=5
)
model_2_inducing = model_2.model.variational_strategy.inducing_points
self.assertTrue(model_1_inducing.shape == (2, 5, 1))
self.assertTrue(model_2_inducing.shape == (2, 5, 1))
self.assertTrue(torch.allclose(model_1_inducing, model_2_inducing))
| 41.074689
| 88
| 0.649763
|
fb666934b32d4196e2f3214ecce3f9ed8c18fb4c
| 5,451
|
py
|
Python
|
colour/temperature/hernandez1999.py
|
BPearlstine/colour
|
40f0281295496774d2a19eee017d50fd0c265bd8
|
[
"Cube",
"BSD-3-Clause"
] | 2
|
2020-05-03T20:15:42.000Z
|
2021-04-09T18:19:06.000Z
|
colour/temperature/hernandez1999.py
|
BPearlstine/colour
|
40f0281295496774d2a19eee017d50fd0c265bd8
|
[
"Cube",
"BSD-3-Clause"
] | null | null | null |
colour/temperature/hernandez1999.py
|
BPearlstine/colour
|
40f0281295496774d2a19eee017d50fd0c265bd8
|
[
"Cube",
"BSD-3-Clause"
] | 1
|
2019-12-11T19:48:27.000Z
|
2019-12-11T19:48:27.000Z
|
# -*- coding: utf-8 -*-
"""
Hernandez-Andres, Lee and Romero (1999) Correlated Colour Temperature
=====================================================================
Defines *Hernandez-Andres et al. (1999)* correlated colour temperature
:math:`T_{cp}` computations objects:
- :func:`colour.temperature.xy_to_CCT_Hernandez1999`: Correlated colour
temperature :math:`T_{cp}` computation of given *CIE xy* chromaticity
coordinates using *Hernandez-Andres, Lee and Romero (1999)* method.
- :func:`colour.temperature.CCT_to_xy_Hernandez1999`: *CIE xy* chromaticity
coordinates computation of given correlated colour temperature
:math:`T_{cp}` using *Hernandez-Andres, Lee and Romero (1999)* method.
See Also
--------
`Colour Temperature & Correlated Colour Temperature Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/temperature/cct.ipynb>`_
References
----------
- :cite:`Hernandez-Andres1999a` : Hernandez-Andres, J., Lee, R. L., &
Romero, J. (1999). Calculating correlated color temperatures across the
entire gamut of daylight and skylight chromaticities. Applied Optics,
38(27), 5703. doi:10.1364/AO.38.005703
"""
from __future__ import division, unicode_literals
import numpy as np
from scipy.optimize import minimize
from colour.colorimetry import ILLUMINANTS
from colour.utilities import as_float_array, as_numeric, tsplit, usage_warning
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['xy_to_CCT_Hernandez1999', 'CCT_to_xy_Hernandez1999']
def xy_to_CCT_Hernandez1999(xy):
"""
Returns the correlated colour temperature :math:`T_{cp}` from given
*CIE xy* chromaticity coordinates using *Hernandez-Andres et al. (1999)*
method.
Parameters
----------
xy : array_like
*CIE xy* chromaticity coordinates.
Returns
-------
numeric
Correlated colour temperature :math:`T_{cp}`.
References
----------
:cite:`Hernandez-Andres1999a`
Examples
--------
>>> xy = np.array([0.31270, 0.32900])
>>> xy_to_CCT_Hernandez1999(xy) # doctest: +ELLIPSIS
6500.7420431...
"""
x, y = tsplit(xy)
n = (x - 0.3366) / (y - 0.1735)
CCT = (-949.86315 + 6253.80338 * np.exp(-n / 0.92159) +
28.70599 * np.exp(-n / 0.20039) + 0.00004 * np.exp(-n / 0.07125))
n = np.where(CCT > 50000, (x - 0.3356) / (y - 0.1691), n)
CCT = np.where(
CCT > 50000,
36284.48953 + 0.00228 * np.exp(-n / 0.07861) +
5.4535e-36 * np.exp(-n / 0.01543),
CCT,
)
return as_numeric(CCT)
def CCT_to_xy_Hernandez1999(CCT, optimisation_parameters=None):
"""
Returns the *CIE xy* chromaticity coordinates from given correlated colour
temperature :math:`T_{cp}` using *Hernandez-Andres et al. (1999)* method.
Parameters
----------
CCT : numeric or array_like
Correlated colour temperature :math:`T_{cp}`.
optimisation_parameters : dict_like, optional
Parameters for :func:`scipy.optimize.minimize` definition.
Returns
-------
ndarray
*CIE xy* chromaticity coordinates.
Warnings
--------
*Hernandez-Andres et al. (1999)* method for computing *CIE xy* chromaticity
coordinates from given correlated colour temperature is not a bijective
function and might produce unexpected results. It is given for consistency
with other correlated colour temperature computation methods but should be
avoided for practical applications. The current implementation relies on
optimization using :func:`scipy.optimize.minimize` definition and thus has
reduced precision and poor performance.
References
----------
:cite:`Hernandez-Andres1999a`
Examples
--------
>>> CCT_to_xy_Hernandez1999(6500.7420431786531) # doctest: +ELLIPSIS
array([ 0.3127..., 0.329...])
"""
usage_warning('"Hernandez-Andres et al. (1999)" method for computing '
'"CIE xy" chromaticity coordinates from given correlated '
'colour temperature is not a bijective function and and'
'might produce unexpected results. It is given for '
'consistency with other correlated colour temperature '
'computation methods but should be avoided for practical '
'applications.')
CCT = as_float_array(CCT)
shape = list(CCT.shape)
CCT = np.atleast_1d(CCT.reshape([-1, 1]))
def objective_function(xy, CCT):
"""
Objective function.
"""
objective = np.linalg.norm(xy_to_CCT_Hernandez1999(xy) - CCT)
return objective
optimisation_settings = {
'method': 'Nelder-Mead',
'options': {
'fatol': 1e-10,
},
}
if optimisation_parameters is not None:
optimisation_settings.update(optimisation_parameters)
CCT = as_float_array([
minimize(
objective_function,
x0=ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65'],
args=(CCT_i, ),
**optimisation_settings).x for CCT_i in CCT
])
return as_numeric(CCT.reshape(shape + [2]))
| 32.254438
| 79
| 0.650156
|
d377322de85a41ebf041f43c79803db2f6a6ecc6
| 766
|
py
|
Python
|
generated-libraries/python/netapp/qos/qos_workload_get_iter_key_td.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/qos/qos_workload_get_iter_key_td.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/qos/qos_workload_get_iter_key_td.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class QosWorkloadGetIterKeyTd(NetAppObject):
"""
Key typedef for table qos_workload_ui
"""
_key_0 = None
@property
def key_0(self):
"""
Field workload
"""
return self._key_0
@key_0.setter
def key_0(self, val):
if val != None:
self.validate('key_0', val)
self._key_0 = val
@staticmethod
def get_api_name():
return "qos-workload-get-iter-key-td"
@staticmethod
def get_desired_attrs():
return [
'key-0',
]
def describe_properties(self):
return {
'key_0': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 21.885714
| 87
| 0.546997
|
752b0d65509d780276afdf545bcc049f019de90b
| 2,631
|
py
|
Python
|
src/primaires/scripting/variable.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/primaires/scripting/variable.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
src/primaires/scripting/variable.py
|
stormi/tsunami
|
bdc853229834b52b2ee8ed54a3161a1a3133d926
|
[
"BSD-3-Clause"
] | null | null | null |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Variable détaillée plus bas."""
from abstraits.obase import *
class Variable(BaseObj):
"""Classe contenant une variable d'évènement.
Une variable d'évènement contient un nom, un certain type bien entendu,
ainsi qu'une aide.
"""
_nom = "variable_scripting"
_version = 1
def __init__(self, evenement, nom, str_type=None):
"""Constructeur d'une variable"""
BaseObj.__init__(self)
self.evenement = evenement
self.nom = nom
self.nom_type = str_type
self.aide = "non précisée"
self._construire()
def __getnewargs__(self):
return (None, "")
def _get_type(self):
types = __import__("primaires.scripting.types").scripting.types
builtins = __builtins__.copy()
try:
type = builtins[self.nom_type]
except KeyError:
type = getattr(types, self.nom_type)
return type
def _set_type(self, type):
self.nom_type = type.__name__
type = property(_get_type, _set_type)
| 36.541667
| 79
| 0.720258
|
694b55cedd3e7f9248138175e42e802fc0b83c0c
| 39,855
|
py
|
Python
|
src/transformers/tokenization_utils.py
|
dctelus/transformers
|
6786cbc4b14ebff0ac59c768cadd109391db9a08
|
[
"Apache-2.0"
] | 8,028
|
2018-11-05T15:19:44.000Z
|
2019-07-16T09:14:59.000Z
|
src/transformers/tokenization_utils.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | 731
|
2018-11-05T21:35:52.000Z
|
2019-07-16T09:51:26.000Z
|
src/transformers/tokenization_utils.py
|
arron1227/transformers
|
b18dfd95e1f60ae65a959a7b255fc06522170d1b
|
[
"Apache-2.0"
] | 2,106
|
2018-11-05T15:29:15.000Z
|
2019-07-16T08:51:57.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tokenization classes for python tokenizers. For fast tokenizers (provided by HuggingFace's tokenizers library) see
tokenization_utils_fast.py
"""
import bisect
import itertools
import re
import unicodedata
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union, overload
from .tokenization_utils_base import (
ENCODE_KWARGS_DOCSTRING,
ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING,
INIT_TOKENIZER_DOCSTRING,
AddedToken,
BatchEncoding,
EncodedInput,
EncodedInputPair,
PreTokenizedInput,
PreTokenizedInputPair,
PreTrainedTokenizerBase,
TextInput,
TextInputPair,
TruncationStrategy,
)
from .utils import PaddingStrategy, TensorType, add_end_docstrings, logging
logger = logging.get_logger(__name__)
# Slow tokenizers are saved in a vocabulary plus three separated files
SPECIAL_TOKENS_MAP_FILE = "special_tokens_map.json"
ADDED_TOKENS_FILE = "added_tokens.json"
TOKENIZER_CONFIG_FILE = "tokenizer_config.json"
class Trie:
"""
Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
Loose reference https://en.wikipedia.org/wiki/Trie
"""
def __init__(self):
self.data = {}
def add(self, word: str):
"""
Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
The special key `""` is used to represent termination.
This function is idempotent, adding twice the same word will leave the trie unchanged
Example:
```python
>>> trie = Trie()
>>> trie.add("Hello 友達")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
>>> trie.add("Hello")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
```
"""
if not word:
# Prevent empty string
return
ref = self.data
for char in word:
ref[char] = char in ref and ref[char] or {}
ref = ref[char]
ref[""] = 1
def split(self, text: str) -> List[str]:
"""
Will look for the words added to the trie within `text`. Output is the original string splitted along the
boundaries of the words found.
This trie will match the longest possible word first !
Example:
```python
>>> trie = Trie()
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS] This is a extra_id_100"]
>>> trie.add("[CLS]")
>>> trie.add("extra_id_1")
>>> trie.add("extra_id_100")
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS]", " This is a ", "extra_id_100"]
```
"""
# indexes are counted left of the chars index.
# "hello", index 0, is left of h, index 1 is between h and e.
# index 5 is right of the "o".
# States are going to capture every possible start (indexes as above)
# as keys, and have as values, a pointer to the position in the trie
# where we're at. This is a partial match for now.
# This enables to keep track of multiple matches while we're iterating
# the string
# If the trie contains, "blowing", and "lower" and we encounter the
# string "blower", we need to split into ["b", "lower"].
# This is where we need to keep track of multiple possible starts.
states = OrderedDict()
# This will contain every indices where we need
# to cut.
# We force to cut at offset 0 and len(text) (added later)
offsets = [0]
# This is used by the lookahead which needs to skip over
# some text where the full match exceeded the place in the initial
# for loop
skip = 0
# Main loop, Giving this algorithm O(n) complexity
for current, current_char in enumerate(text):
if skip and current < skip:
# Prevents the lookahead for matching twice
# like extra_id_100 and id_100
continue
# This will track every state
# that stop matching, we need to stop tracking them.
# If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
# fail on "b", we need to remove 0 from the valid states.
to_remove = set()
# Whenever we found a match, we need to drop everything
# this is a greedy algorithm, it will match on the first found token
reset = False
# In this case, we already have partial matches (But unfinished)
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
# Lookahead to match longest first
# Important in case of extra_id_1 vs extra_id_100
# Here we are also actively looking for other earlier partial
# matches
# "[CLS]", "L", we need to match CLS even if L is special
for lookstart, looktrie_pointer in states.items():
if lookstart > start:
# This partial match is later, we can stop looking
break
elif lookstart < start:
# This partial match is earlier, the trie pointer
# was already updated, so index is + 1
lookahead_index = current + 1
end = current + 1
else:
# Here lookstart == start and
# looktrie_pointer == trie_pointer
# It wasn't updated yet so indices are current ones
lookahead_index = current
end = current
next_char = text[lookahead_index] if lookahead_index < len(text) else None
if "" in looktrie_pointer:
start = lookstart
end = lookahead_index
skip = lookahead_index
while next_char in looktrie_pointer:
looktrie_pointer = looktrie_pointer[next_char]
lookahead_index += 1
if "" in looktrie_pointer:
start = lookstart
end = lookahead_index
skip = lookahead_index
if lookahead_index == len(text):
# End of string
break
next_char = text[lookahead_index]
# End lookahead
# Storing and resetting
offsets.append(start)
offsets.append(end)
reset = True
break
elif current_char in trie_pointer:
# The current character being looked at has a match within the trie
# update the pointer (it will be stored back into states later).
trie_pointer = trie_pointer[current_char]
# Storing back the new pointer into the states.
# Partial matches got longer by one.
states[start] = trie_pointer
else:
# The new character has not match in the trie, we need
# to stop keeping track of this partial match.
# We can't do it directly within the loop because of how
# python iteration works
to_remove.add(start)
# Either clearing the full start (we found a real match)
# Or clearing only the partial matches that didn't work.
if reset:
states = {}
else:
for start in to_remove:
del states[start]
# If this character is a starting character within the trie
# start keeping track of this partial match.
if current >= skip and current_char in self.data:
states[current] = self.data[current_char]
# We have a cut at the end with states.
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
end = len(text)
offsets.append(start)
offsets.append(end)
# Longest cut is always the one with lower start so the first
# item so we need to break.
break
return self.cut_text(text, offsets)
def cut_text(self, text, offsets):
# We have all the offsets now, we just need to do the actual splitting.
# We need to eventually add the first part of the string and the eventual
# last part.
offsets.append(len(text))
tokens = []
start = 0
for end in offsets:
if start > end:
logger.error(
"There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway."
)
continue
elif start == end:
# This might happen if there's a match at index 0
# we're also preventing zero-width cuts in case of two
# consecutive matches
continue
tokens.append(text[start:end])
start = end
return tokens
def _is_whitespace(char):
"""Checks whether `char` is a whitespace character."""
# \t, \n, and \r are technically control characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `char` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `char` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def _is_end_of_word(text):
"""Checks whether the last character in text is one of a punctuation, control or whitespace character."""
last_char = text[-1]
return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))
def _is_start_of_word(text):
"""Checks whether the first character in text is one of a punctuation, control or whitespace character."""
first_char = text[0]
return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))
def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str):
"""
Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
"""
insertion_idx = bisect.bisect_left(token_list, new_token)
# Checks if new_token is already in the ordered token_list
if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token:
# new_token is in token_list, don't add
return
else:
token_list.insert(insertion_idx, new_token)
@add_end_docstrings(INIT_TOKENIZER_DOCSTRING)
class PreTrainedTokenizer(PreTrainedTokenizerBase):
"""
Base class for all slow tokenizers.
Inherits from [`~tokenization_utils_base.PreTrainedTokenizerBase`].
Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Added tokens - We store this for both slow and fast tokenizers
# until the serialization of Fast tokenizers is updated
self.added_tokens_encoder: Dict[str, int] = {}
self.added_tokens_decoder: Dict[int, str] = {}
self.unique_no_split_tokens: List[str] = []
self.tokens_trie = Trie()
self._decode_use_source_tokenizer = False
@property
def is_fast(self) -> bool:
return False
@property
def vocab_size(self) -> int:
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
raise NotImplementedError
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
`Dict[str, int]`: The added tokens.
"""
return self.added_tokens_encoder
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.vocab_size + len(self.added_tokens_encoder)
def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
it with indices starting from length of the current vocabulary.
Args:
new_tokens (`List[str]`or `List[tokenizers.AddedToken]`):
Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by
checking if the tokenizer assign the index of the `unk_token` to them).
special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the tokens should be added as special tokens.
Returns:
`int`: The number of tokens actually added to the vocabulary.
Examples:
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
model = BertModel.from_pretrained("bert-base-uncased")
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
print("We have added", num_added_toks, "tokens")
# Note: resize_token_embeddings expects to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
new_tokens = [str(tok) for tok in new_tokens]
tokens_to_add = []
for token in new_tokens:
if not isinstance(token, str):
raise TypeError(f"Token {token} is not a string but a {type(token)}.")
if not special_tokens and hasattr(self, "do_lower_case") and self.do_lower_case:
token = token.lower()
if (
token != self.unk_token
and self.convert_tokens_to_ids(token) == self.convert_tokens_to_ids(self.unk_token)
and token not in tokens_to_add
):
tokens_to_add.append(token)
if self.verbose:
logger.info(f"Adding {token} to the vocabulary")
added_tok_encoder = dict((tok, len(self) + i) for i, tok in enumerate(tokens_to_add))
added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
self.added_tokens_encoder.update(added_tok_encoder)
self.added_tokens_decoder.update(added_tok_decoder)
# Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
if special_tokens:
if len(new_tokens) == 1:
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, new_tokens[0])
else:
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(new_tokens)))
else:
# Or on the newly added tokens
if len(tokens_to_add) == 1:
_insert_one_token_to_ordered_list(self.unique_no_split_tokens, tokens_to_add[0])
else:
self.unique_no_split_tokens = sorted(set(self.unique_no_split_tokens).union(set(tokens_to_add)))
self._create_trie(self.unique_no_split_tokens)
return len(tokens_to_add)
def _create_trie(self, unique_no_split_tokens):
trie = Trie()
for token in unique_no_split_tokens:
if hasattr(self, "do_lower_case") and self.do_lower_case and token not in self.all_special_tokens:
trie.add(token.lower())
else:
trie.add(token)
self.tokens_trie = trie
def num_special_tokens_to_add(self, pair: bool = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
<Tip>
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not put
this inside your training loop.
</Tip>
Args:
pair (`bool`, *optional*, defaults to `False`):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence.
Returns:
`int`: Number of special tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None))
def tokenize(self, text: TextInput, **kwargs) -> List[str]:
"""
Converts a string in a sequence of tokens, using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
(BPE/SentencePieces/WordPieces). Takes care of added tokens.
Args:
text (`str`):
The sequence to be encoded.
**kwargs (additional keyword arguments):
Passed along to the model-specific `prepare_for_tokenization` preprocessing method.
Returns:
`List[str]`: The list of tokens.
"""
# Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
all_special_tokens_extended = dict(
(str(t), t) for t in self.all_special_tokens_extended if isinstance(t, AddedToken)
)
text, kwargs = self.prepare_for_tokenization(text, **kwargs)
if kwargs:
logger.warning(f"Keyword arguments {kwargs} not recognized.")
# TODO: should this be in the base class?
if hasattr(self, "do_lower_case") and self.do_lower_case:
# convert non-special tokens to lowercase
escaped_special_toks = [
re.escape(s_tok) for s_tok in (self.unique_no_split_tokens + self.all_special_tokens)
]
pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
text = re.sub(pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text)
no_split_token = set(self.unique_no_split_tokens)
tokens = self.tokens_trie.split(text)
# ["This is something", "<special_token_1>", " else"]
for i, token in enumerate(tokens):
if token in no_split_token:
tok_extended = all_special_tokens_extended.get(token, None)
left = tokens[i - 1] if i > 0 else None
right = tokens[i + 1] if i < len(tokens) - 1 else None
if isinstance(tok_extended, AddedToken):
if tok_extended.rstrip and right:
# A bit counter-intuitive but we strip the left of the string
# since tok_extended.rstrip means the special token is eating all white spaces on its right
tokens[i + 1] = right.lstrip()
# Strip white spaces on the left
if tok_extended.lstrip and left:
tokens[i - 1] = left.rstrip() # Opposite here
else:
# We strip left and right by default
if right:
tokens[i + 1] = right.lstrip()
if left:
tokens[i - 1] = left.rstrip()
# ["This is something", "<special_token_1>", "else"]
tokenized_text = []
for token in tokens:
# Need to skip eventual empty (fully stripped) tokens
if not token:
continue
if token in no_split_token:
tokenized_text.append(token)
else:
tokenized_text.extend(self._tokenize(token))
# ["This", " is", " something", "<special_token_1>", "else"]
return tokenized_text
def _tokenize(self, text, **kwargs):
"""
Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""
raise NotImplementedError
def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
"""
Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
vocabulary.
Args:
tokens (`str` or `List[str]`): One or several token(s) to convert to token id(s).
Returns:
`int` or `List[int]`: The token id or list of token ids.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
if token in self.added_tokens_encoder:
return self.added_tokens_encoder[token]
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
raise NotImplementedError
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
if is_split_into_words:
tokens = list(
itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text))
)
return self.convert_tokens_to_ids(tokens)
else:
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
if is_split_into_words:
raise ValueError(
f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_split_into_words=True`."
)
else:
raise ValueError(
f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
first_ids = get_input_ids(text)
second_ids = get_input_ids(text_pair) if text_pair is not None else None
return self.prepare_for_model(
first_ids,
pair_ids=second_ids,
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
List[PreTokenizedInputPair],
List[EncodedInput],
List[EncodedInputPair],
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text, **kwargs)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], str):
if is_split_into_words:
tokens = list(
itertools.chain(*(self.tokenize(t, is_split_into_words=True, **kwargs) for t in text))
)
return self.convert_tokens_to_ids(tokens)
else:
return self.convert_tokens_to_ids(text)
elif isinstance(text, (list, tuple)) and len(text) > 0 and isinstance(text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
input_ids = []
for ids_or_pair_ids in batch_text_or_text_pairs:
if not isinstance(ids_or_pair_ids, (list, tuple)):
ids, pair_ids = ids_or_pair_ids, None
elif is_split_into_words and not isinstance(ids_or_pair_ids[0], (list, tuple)):
ids, pair_ids = ids_or_pair_ids, None
else:
ids, pair_ids = ids_or_pair_ids
first_ids = get_input_ids(ids)
second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
input_ids.append((first_ids, second_ids))
batch_outputs = self._batch_prepare_for_model(
input_ids,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(
self,
batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
manages a moving window (with user defined stride) for overflowing tokens
Args:
batch_ids_pairs: list of tokenized input ids or input ids pairs
"""
batch_outputs = {}
for first_ids, second_ids in batch_ids_pairs:
outputs = self.prepare_for_model(
first_ids,
second_ids,
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterward
return_attention_mask=False, # we pad in batch afterward
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
def prepare_for_tokenization(
self, text: str, is_split_into_words: bool = False, **kwargs
) -> Tuple[str, Dict[str, Any]]:
"""
Performs any necessary transformations before tokenization.
This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
`kwargs` at the end of the encoding process to be sure all the arguments have been used.
Args:
text (`str`):
The text to prepare.
is_split_into_words (`bool`, *optional*, defaults to `False`):
Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
which it will tokenize. This is useful for NER or token classification.
kwargs:
Keyword arguments to use for the tokenization.
Returns:
`Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
"""
return (text, kwargs)
def get_special_tokens_mask(
self, token_ids_0: List, token_ids_1: Optional[List] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of ids of the first sequence.
token_ids_1 (`List[int]`, *optional*):
List of ids of the second sequence.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
@overload
def convert_ids_to_tokens(self, ids: int, skip_special_tokens: bool = False) -> str:
...
@overload
def convert_ids_to_tokens(self, ids: List[int], skip_special_tokens: bool = False) -> List[str]:
...
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index: int) -> str:
raise NotImplementedError
def convert_tokens_to_string(self, tokens: List[str]) -> str:
return " ".join(tokens)
def _decode(
self,
token_ids: List[int],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = True,
spaces_between_special_tokens: bool = True,
**kwargs
) -> str:
self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
sub_texts = []
current_sub_text = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
current_sub_text = []
sub_texts.append(token)
else:
current_sub_text.append(token)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(current_sub_text))
if spaces_between_special_tokens:
text = " ".join(sub_texts)
else:
text = "".join(sub_texts)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
| 41.602296
| 132
| 0.601129
|
59026e9d131f02ea73acdce89e779f228781b46f
| 22,444
|
py
|
Python
|
tensorboardX/proto/summary_pb2.py
|
cnzeki/tensorboardX
|
eddd644a0876fa24cdc036cf94495e7cd59c125c
|
[
"MIT"
] | 6
|
2019-04-30T17:32:28.000Z
|
2019-11-14T03:19:16.000Z
|
tensorboardX/proto/summary_pb2.py
|
cnzeki/tensorboardX
|
eddd644a0876fa24cdc036cf94495e7cd59c125c
|
[
"MIT"
] | null | null | null |
tensorboardX/proto/summary_pb2.py
|
cnzeki/tensorboardX
|
eddd644a0876fa24cdc036cf94495e7cd59c125c
|
[
"MIT"
] | 3
|
2019-04-29T16:21:38.000Z
|
2019-11-14T03:19:31.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorboardX/proto/summary.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorboardX.proto import tensor_pb2 as tensorboardX_dot_proto_dot_tensor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorboardX/proto/summary.proto',
package='tensorboardX',
syntax='proto3',
serialized_options=_b('\n\030org.tensorflow.frameworkB\rSummaryProtosP\001\370\001\001'),
serialized_pb=_b('\n tensorboardX/proto/summary.proto\x12\x0ctensorboardX\x1a\x1ftensorboardX/proto/tensor.proto\"\'\n\x12SummaryDescription\x12\x11\n\ttype_hint\x18\x01 \x01(\t\"\x87\x01\n\x0eHistogramProto\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x12\x0b\n\x03num\x18\x03 \x01(\x01\x12\x0b\n\x03sum\x18\x04 \x01(\x01\x12\x13\n\x0bsum_squares\x18\x05 \x01(\x01\x12\x18\n\x0c\x62ucket_limit\x18\x06 \x03(\x01\x42\x02\x10\x01\x12\x12\n\x06\x62ucket\x18\x07 \x03(\x01\x42\x02\x10\x01\"\x84\x01\n\x0fSummaryMetadata\x12=\n\x0bplugin_data\x18\x01 \x03(\x0b\x32(.tensorboardX.SummaryMetadata.PluginData\x1a\x32\n\nPluginData\x12\x13\n\x0bplugin_name\x18\x01 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x02 \x01(\t\"\xea\x04\n\x07Summary\x12*\n\x05value\x18\x01 \x03(\x0b\x32\x1b.tensorboardX.Summary.Value\x1aX\n\x05Image\x12\x0e\n\x06height\x18\x01 \x01(\x05\x12\r\n\x05width\x18\x02 \x01(\x05\x12\x12\n\ncolorspace\x18\x03 \x01(\x05\x12\x1c\n\x14\x65ncoded_image_string\x18\x04 \x01(\x0c\x1a}\n\x05\x41udio\x12\x13\n\x0bsample_rate\x18\x01 \x01(\x02\x12\x14\n\x0cnum_channels\x18\x02 \x01(\x03\x12\x15\n\rlength_frames\x18\x03 \x01(\x03\x12\x1c\n\x14\x65ncoded_audio_string\x18\x04 \x01(\x0c\x12\x14\n\x0c\x63ontent_type\x18\x05 \x01(\t\x1a\xd9\x02\n\x05Value\x12\x11\n\tnode_name\x18\x07 \x01(\t\x12\x0b\n\x03tag\x18\x01 \x01(\t\x12/\n\x08metadata\x18\t \x01(\x0b\x32\x1d.tensorboardX.SummaryMetadata\x12\x16\n\x0csimple_value\x18\x02 \x01(\x02H\x00\x12&\n\x1cobsolete_old_style_histogram\x18\x03 \x01(\x0cH\x00\x12,\n\x05image\x18\x04 \x01(\x0b\x32\x1b.tensorboardX.Summary.ImageH\x00\x12-\n\x05histo\x18\x05 \x01(\x0b\x32\x1c.tensorboardX.HistogramProtoH\x00\x12,\n\x05\x61udio\x18\x06 \x01(\x0b\x32\x1b.tensorboardX.Summary.AudioH\x00\x12+\n\x06tensor\x18\x08 \x01(\x0b\x32\x19.tensorboardX.TensorProtoH\x00\x42\x07\n\x05valueB.\n\x18org.tensorflow.frameworkB\rSummaryProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorboardX_dot_proto_dot_tensor__pb2.DESCRIPTOR,])
_SUMMARYDESCRIPTION = _descriptor.Descriptor(
name='SummaryDescription',
full_name='tensorboardX.SummaryDescription',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type_hint', full_name='tensorboardX.SummaryDescription.type_hint', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=83,
serialized_end=122,
)
_HISTOGRAMPROTO = _descriptor.Descriptor(
name='HistogramProto',
full_name='tensorboardX.HistogramProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min', full_name='tensorboardX.HistogramProto.min', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max', full_name='tensorboardX.HistogramProto.max', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num', full_name='tensorboardX.HistogramProto.num', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sum', full_name='tensorboardX.HistogramProto.sum', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sum_squares', full_name='tensorboardX.HistogramProto.sum_squares', index=4,
number=5, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bucket_limit', full_name='tensorboardX.HistogramProto.bucket_limit', index=5,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bucket', full_name='tensorboardX.HistogramProto.bucket', index=6,
number=7, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\020\001'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=125,
serialized_end=260,
)
_SUMMARYMETADATA_PLUGINDATA = _descriptor.Descriptor(
name='PluginData',
full_name='tensorboardX.SummaryMetadata.PluginData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='plugin_name', full_name='tensorboardX.SummaryMetadata.PluginData.plugin_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='content', full_name='tensorboardX.SummaryMetadata.PluginData.content', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=345,
serialized_end=395,
)
_SUMMARYMETADATA = _descriptor.Descriptor(
name='SummaryMetadata',
full_name='tensorboardX.SummaryMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='plugin_data', full_name='tensorboardX.SummaryMetadata.plugin_data', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SUMMARYMETADATA_PLUGINDATA, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=263,
serialized_end=395,
)
_SUMMARY_IMAGE = _descriptor.Descriptor(
name='Image',
full_name='tensorboardX.Summary.Image',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='tensorboardX.Summary.Image.height', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='width', full_name='tensorboardX.Summary.Image.width', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='colorspace', full_name='tensorboardX.Summary.Image.colorspace', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encoded_image_string', full_name='tensorboardX.Summary.Image.encoded_image_string', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=453,
serialized_end=541,
)
_SUMMARY_AUDIO = _descriptor.Descriptor(
name='Audio',
full_name='tensorboardX.Summary.Audio',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sample_rate', full_name='tensorboardX.Summary.Audio.sample_rate', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_channels', full_name='tensorboardX.Summary.Audio.num_channels', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='length_frames', full_name='tensorboardX.Summary.Audio.length_frames', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='encoded_audio_string', full_name='tensorboardX.Summary.Audio.encoded_audio_string', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='content_type', full_name='tensorboardX.Summary.Audio.content_type', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=543,
serialized_end=668,
)
_SUMMARY_VALUE = _descriptor.Descriptor(
name='Value',
full_name='tensorboardX.Summary.Value',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='node_name', full_name='tensorboardX.Summary.Value.node_name', index=0,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tag', full_name='tensorboardX.Summary.Value.tag', index=1,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='tensorboardX.Summary.Value.metadata', index=2,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='simple_value', full_name='tensorboardX.Summary.Value.simple_value', index=3,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='obsolete_old_style_histogram', full_name='tensorboardX.Summary.Value.obsolete_old_style_histogram', index=4,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image', full_name='tensorboardX.Summary.Value.image', index=5,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='histo', full_name='tensorboardX.Summary.Value.histo', index=6,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='audio', full_name='tensorboardX.Summary.Value.audio', index=7,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor', full_name='tensorboardX.Summary.Value.tensor', index=8,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='value', full_name='tensorboardX.Summary.Value.value',
index=0, containing_type=None, fields=[]),
],
serialized_start=671,
serialized_end=1016,
)
_SUMMARY = _descriptor.Descriptor(
name='Summary',
full_name='tensorboardX.Summary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='tensorboardX.Summary.value', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SUMMARY_IMAGE, _SUMMARY_AUDIO, _SUMMARY_VALUE, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=398,
serialized_end=1016,
)
_SUMMARYMETADATA_PLUGINDATA.containing_type = _SUMMARYMETADATA
_SUMMARYMETADATA.fields_by_name['plugin_data'].message_type = _SUMMARYMETADATA_PLUGINDATA
_SUMMARY_IMAGE.containing_type = _SUMMARY
_SUMMARY_AUDIO.containing_type = _SUMMARY
_SUMMARY_VALUE.fields_by_name['metadata'].message_type = _SUMMARYMETADATA
_SUMMARY_VALUE.fields_by_name['image'].message_type = _SUMMARY_IMAGE
_SUMMARY_VALUE.fields_by_name['histo'].message_type = _HISTOGRAMPROTO
_SUMMARY_VALUE.fields_by_name['audio'].message_type = _SUMMARY_AUDIO
_SUMMARY_VALUE.fields_by_name['tensor'].message_type = tensorboardX_dot_proto_dot_tensor__pb2._TENSORPROTO
_SUMMARY_VALUE.containing_type = _SUMMARY
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['simple_value'])
_SUMMARY_VALUE.fields_by_name['simple_value'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['obsolete_old_style_histogram'])
_SUMMARY_VALUE.fields_by_name['obsolete_old_style_histogram'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['image'])
_SUMMARY_VALUE.fields_by_name['image'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['histo'])
_SUMMARY_VALUE.fields_by_name['histo'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['audio'])
_SUMMARY_VALUE.fields_by_name['audio'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY_VALUE.oneofs_by_name['value'].fields.append(
_SUMMARY_VALUE.fields_by_name['tensor'])
_SUMMARY_VALUE.fields_by_name['tensor'].containing_oneof = _SUMMARY_VALUE.oneofs_by_name['value']
_SUMMARY.fields_by_name['value'].message_type = _SUMMARY_VALUE
DESCRIPTOR.message_types_by_name['SummaryDescription'] = _SUMMARYDESCRIPTION
DESCRIPTOR.message_types_by_name['HistogramProto'] = _HISTOGRAMPROTO
DESCRIPTOR.message_types_by_name['SummaryMetadata'] = _SUMMARYMETADATA
DESCRIPTOR.message_types_by_name['Summary'] = _SUMMARY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SummaryDescription = _reflection.GeneratedProtocolMessageType('SummaryDescription', (_message.Message,), dict(
DESCRIPTOR = _SUMMARYDESCRIPTION,
__module__ = 'tensorboardX.proto.summary_pb2'
# @@protoc_insertion_point(class_scope:tensorboardX.SummaryDescription)
))
_sym_db.RegisterMessage(SummaryDescription)
HistogramProto = _reflection.GeneratedProtocolMessageType('HistogramProto', (_message.Message,), dict(
DESCRIPTOR = _HISTOGRAMPROTO,
__module__ = 'tensorboardX.proto.summary_pb2'
# @@protoc_insertion_point(class_scope:tensorboardX.HistogramProto)
))
_sym_db.RegisterMessage(HistogramProto)
SummaryMetadata = _reflection.GeneratedProtocolMessageType('SummaryMetadata', (_message.Message,), dict(
PluginData = _reflection.GeneratedProtocolMessageType('PluginData', (_message.Message,), dict(
DESCRIPTOR = _SUMMARYMETADATA_PLUGINDATA,
__module__ = 'tensorboardX.proto.summary_pb2'
# @@protoc_insertion_point(class_scope:tensorboardX.SummaryMetadata.PluginData)
))
,
DESCRIPTOR = _SUMMARYMETADATA,
__module__ = 'tensorboardX.proto.summary_pb2'
# @@protoc_insertion_point(class_scope:tensorboardX.SummaryMetadata)
))
_sym_db.RegisterMessage(SummaryMetadata)
_sym_db.RegisterMessage(SummaryMetadata.PluginData)
Summary = _reflection.GeneratedProtocolMessageType('Summary', (_message.Message,), dict(
Image = _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), dict(
DESCRIPTOR = _SUMMARY_IMAGE,
__module__ = 'tensorboardX.proto.summary_pb2'
# @@protoc_insertion_point(class_scope:tensorboardX.Summary.Image)
))
,
Audio = _reflection.GeneratedProtocolMessageType('Audio', (_message.Message,), dict(
DESCRIPTOR = _SUMMARY_AUDIO,
__module__ = 'tensorboardX.proto.summary_pb2'
# @@protoc_insertion_point(class_scope:tensorboardX.Summary.Audio)
))
,
Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), dict(
DESCRIPTOR = _SUMMARY_VALUE,
__module__ = 'tensorboardX.proto.summary_pb2'
# @@protoc_insertion_point(class_scope:tensorboardX.Summary.Value)
))
,
DESCRIPTOR = _SUMMARY,
__module__ = 'tensorboardX.proto.summary_pb2'
# @@protoc_insertion_point(class_scope:tensorboardX.Summary)
))
_sym_db.RegisterMessage(Summary)
_sym_db.RegisterMessage(Summary.Image)
_sym_db.RegisterMessage(Summary.Audio)
_sym_db.RegisterMessage(Summary.Value)
DESCRIPTOR._options = None
_HISTOGRAMPROTO.fields_by_name['bucket_limit']._options = None
_HISTOGRAMPROTO.fields_by_name['bucket']._options = None
# @@protoc_insertion_point(module_scope)
| 42.34717
| 1,931
| 0.752762
|
6352f1fcf4ec3521127410b0df131336453f51f7
| 2,845
|
py
|
Python
|
upstream/wav2vec2/expert.py
|
simpleoier/s3prl
|
05d6fdebd8fc4e20ded01de9261028885f7f9181
|
[
"MIT"
] | null | null | null |
upstream/wav2vec2/expert.py
|
simpleoier/s3prl
|
05d6fdebd8fc4e20ded01de9261028885f7f9181
|
[
"MIT"
] | null | null | null |
upstream/wav2vec2/expert.py
|
simpleoier/s3prl
|
05d6fdebd8fc4e20ded01de9261028885f7f9181
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ upstream/wav2vec2/expert.py ]
# Synopsis [ the wav2vec2 wrapper ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import math
import yaml
import random
from packaging import version
#-------------#
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
#-------------#
import fairseq
############
# CONSTANT #
############
SAMPLE_RATE = 16000
EXAMPLE_SEC = 5
###################
# UPSTREAM EXPERT #
###################
class UpstreamExpert(nn.Module):
"""
The wav2vec 2.0 wrapper
"""
def __init__(self, ckpt, **kwargs):
super(UpstreamExpert, self).__init__()
assert version.parse(fairseq.__version__) >= version.parse("0.10.2")
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt])
self.model = model[0]
pseudo_input = torch.randn(1, SAMPLE_RATE * EXAMPLE_SEC)
padding_mask = torch.zeros(1, SAMPLE_RATE * EXAMPLE_SEC).long().bool()
result = self.model.extract_features(pseudo_input, padding_mask)
pseudo_feature, padding_mask = result['x'], result['padding_mask']
self.output_dim = pseudo_feature.size(-1)
# Interface
def get_output_dim(self):
return self.output_dim
# Interface
def get_downsample_rate(self):
return 320
# Interface
def forward(self, wavs):
"""
Args:
wavs:
list of unpadded wavs [wav1, wav2, ...]
each wav is in torch.FloatTensor with sample rate 16000
and already put in the device assigned by command-line args
Return:
features:
list of unpadded features [feat1, feat2, ...]
each feat is in torch.FloatTensor and already
put in the device assigned by command-line args
"""
device = wavs[0].device
wav_lengths = torch.LongTensor([len(wav) for wav in wavs]).to(device)
wav_padding_mask = ~torch.lt(
torch.arange(max(wav_lengths)).unsqueeze(0).to(device),
wav_lengths.unsqueeze(1)
)
padded_wav = pad_sequence(wavs, batch_first=True)
result = self.model.extract_features(padded_wav, wav_padding_mask)
features, feat_padding_mask = result['x'], result['padding_mask']
feat_lengths = (features.size(1) - feat_padding_mask.sum(dim=-1)).tolist()
features = [feat[:length] for feat, length in zip(features, feat_lengths)]
return features
| 30.923913
| 99
| 0.560633
|
02732fc756d0307b1dcd1b1c09b05388bd5ab9db
| 221
|
py
|
Python
|
malls/utils/pagination.py
|
wangjianxun1995/mall
|
552b635348cd5778a56c0c1fe630c6f954e99505
|
[
"MIT"
] | null | null | null |
malls/utils/pagination.py
|
wangjianxun1995/mall
|
552b635348cd5778a56c0c1fe630c6f954e99505
|
[
"MIT"
] | null | null | null |
malls/utils/pagination.py
|
wangjianxun1995/mall
|
552b635348cd5778a56c0c1fe630c6f954e99505
|
[
"MIT"
] | null | null | null |
from rest_framework.pagination import LimitOffsetPagination,PageNumberPagination
class StandardResultsSetPagination(PageNumberPagination):
page_size = 2
page_size_query_param = 'page_size'
max_page_size = 20
| 31.571429
| 80
| 0.828054
|
35dac4746735f0a24465bccfaed905f538e1837e
| 2,958
|
py
|
Python
|
code/Iris_pytorch/register_model.py
|
matheesan-CGI/AzureML_MLflow_demo
|
d4791a28b5f131caf94fa2a84998639c25c3afc9
|
[
"MIT"
] | 5
|
2021-05-11T10:31:33.000Z
|
2022-01-16T22:35:44.000Z
|
code/Iris_pytorch/register_model.py
|
matheesan-CGI/AzureML_MLflow_demo
|
d4791a28b5f131caf94fa2a84998639c25c3afc9
|
[
"MIT"
] | null | null | null |
code/Iris_pytorch/register_model.py
|
matheesan-CGI/AzureML_MLflow_demo
|
d4791a28b5f131caf94fa2a84998639c25c3afc9
|
[
"MIT"
] | 3
|
2021-03-19T15:02:36.000Z
|
2021-06-01T13:32:50.000Z
|
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import os, json, sys, azureml.core
from azureml.core import Workspace, Experiment, Run
from azureml.core.model import Model
from azureml.core.authentication import AzureCliAuthentication
# Load the JSON settings file and relevant section
print("Loading settings")
with open(os.path.join("code", "settings.json")) as f:
settings = json.load(f)
deployment_settings = settings["deployment"]
# Get details from Run
print("Loading Run Details")
with open(os.path.join("code", "run_details.json")) as f:
run_details = json.load(f)
# Get workspace
print("Loading Workspace")
cli_auth = AzureCliAuthentication()
config_file_path = os.environ.get("GITHUB_WORKSPACE", default="aml_service")
config_file_name = "aml_arm_config.json"
ws = Workspace.from_config(
path=config_file_path,
auth=cli_auth,
_file_name=config_file_name)
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
# Loading Run
print("Loading Run")
experiment = Experiment(workspace=ws, name=run_details["experiment_name"])
run = Run(experiment=experiment, run_id=run_details["run_id"])
# Register model
tags = deployment_settings["model"]["tags"]
model = run.register_model(model_name=deployment_settings["model"]["name"],
model_path=deployment_settings["model"]["path"],
tags=tags,
description=deployment_settings["model"]["description"],
)
| 45.507692
| 87
| 0.751183
|
ea8380861fea4fe461ed94acdd85ceb29c33a3b3
| 40,828
|
py
|
Python
|
packages/QtInventor/QInspectorWidget.py
|
TheHubbit/PyInventor
|
351aa15d6394a43fec5f42e757136e00d007fb5e
|
[
"BSD-3-Clause"
] | 15
|
2015-07-24T06:42:52.000Z
|
2021-09-27T10:09:46.000Z
|
packages/QtInventor/QInspectorWidget.py
|
TheHubbit/PyInventor
|
351aa15d6394a43fec5f42e757136e00d007fb5e
|
[
"BSD-3-Clause"
] | 8
|
2015-04-07T22:49:22.000Z
|
2019-04-11T19:11:47.000Z
|
packages/QtInventor/QInspectorWidget.py
|
TheHubbit/PyInventor
|
351aa15d6394a43fec5f42e757136e00d007fb5e
|
[
"BSD-3-Clause"
] | 9
|
2015-02-25T08:13:17.000Z
|
2022-01-13T18:01:38.000Z
|
# QInspectorWidget class and helper classes implementation
# Author: Thomas Moeller
#
# Copyright (C) the PyInventor contributors. All rights reserved.
# This file is part of PyInventor, distributed under the BSD 3-Clause
# License. For full terms see the included COPYING file.
#
# Acknowledgements:
# For the creation of the PyInventor scene graph editor the tutorials from Yasin Uludag's blog
# on model view programming in Qt have been an indispensable resource. Thank you Yasin!
# See also: http://www.yasinuludag.com/blog/?p=98
#
import inventor as iv
from PySide import QtCore, QtGui
class QSceneObjectProxy(QtCore.QObject):
"""
This class wraps around actual Inventor scene objects and functions as a proxy
for QtCore.QAbstractItemModel classes. While scene objects may have multiple
parents, model nodes may not. Therefore an instance of QSceneObjectProxy is
created for each child regardless of shared instances, thereby creating a one
to one relationship between parents and children.
"""
def __init__(self, sceneObject=None, parent=None, connectedFrom=None, connectedTo=None):
"""Initializes node instance (pass scene object and parent)"""
super(QSceneObjectProxy, self).__init__()
self._sceneObject = sceneObject
self._name = ""
self._children = []
self._parent = parent
self._connectedFrom = connectedFrom
self._connectedTo = connectedTo
if self._sceneObject is not None:
self._name = sceneObject.get_name()
if parent is not None:
parent._children.append(self)
if sceneObject is not None:
self.initializeChildren(sceneObject)
def createChildProxy(self, sceneObject, parent=None, connectedFrom=None, connectedTo=None):
if not self.isInPath(sceneObject):
node = QSceneObjectProxy(sceneObject, parent, connectedFrom, connectedTo)
def isInPath(self, sceneObject):
if self._sceneObject == sceneObject:
return True
if self.parent() is not None:
return self.parent().isInPath(sceneObject)
return False
def initializeChildren(self, sceneObject, connectedFrom=None, connectedTo=None):
if sceneObject is not None:
# add all child nodes
if isinstance(sceneObject, iv.Node):
for child in sceneObject:
self.createChildProxy(child, self)
# add nodekit parts
if isinstance(sceneObject, iv.BaseKit):
publicParts = [p["Name"] for p in sceneObject.get_nodekit_catalog() if p["Public"]]
for part in publicParts:
if (sceneObject.get_field(part).value is not None):
self.createChildProxy(sceneObject.get_field(part).value, self, None, sceneObject.get_field(part))
# add all objects connected through fields
for field in sceneObject.get_field():
for conn in field.get_connections():
if conn.get_container() is not None:
self.createChildProxy(conn.get_container(), self, conn, field)
if field.get_connected_engine() is not None:
output = field.get_connected_engine()
self.createChildProxy(output.get_container(), self, output, field)
def child(self, row):
"""Returns child node at given index"""
return self._children[row]
def childCount(self):
"""Returns number of children"""
return len(self._children)
def parent(self):
"""Returns parent node"""
return self._parent
def row(self):
"""Returns row index of this child relative to the parent node"""
if self._parent is not None:
return self._parent._children.index(self)
return -1
def isChildNode(self):
return self._connectedFrom is None
def title(self):
"""Returns the scene object display title"""
title = ""
if self._sceneObject is not None:
title += self._sceneObject.get_type()
if self._connectedFrom is not None:
title += " " + self._connectedFrom.get_name()
if self._connectedTo is not None:
title += "->" + self._connectedTo.get_name()
return title
def type(self):
"""Returns the scene object type"""
if self._sceneObject is None:
return ""
return self._sceneObject.get_type()
def isGroup(self):
"""Returns True if the scene object is derived from Group"""
if self._sceneObject is None:
return False
return self._sceneObject.check_type("Group")
def name(self):
"""Returns scene object instance name"""
if self._sceneObject is None:
return ""
return self._sceneObject.get_name()
def setName(self, name):
"""Sets new scene object instance name"""
if self._sceneObject is not None:
self._sceneObject.set_name(name)
def sceneObject(self):
"""Returns Inventor scene object represented by this proxy node"""
return self._sceneObject
def setSceneObject(self, node):
"""Sets a new Inventor scene object represented by this proxy node"""
self._sceneObject = node
def changeChildType(self, position, node):
"""Changes the type of a child node"""
if (node is not None) and (position >= 0) and (position < len(self._children)):
if isinstance(self._sceneObject, iv.BaseKit) and self._children[position]._connectedTo is not None:
self._children[position]._connectedTo.value = node
self._children[position].setSceneObject(node)
if isinstance(self._sceneObject, iv.Group):
self._sceneObject[position] = node
self._children[position].initializeChildren(node)
def insertChild(self, position, child):
"""Inserts a new child"""
if position < 0 or position > len(self._children):
return False
self._children.insert(position, child)
child._parent = self
# insert child to scene object too
if self._sceneObject is not None:
if child._sceneObject is not None:
self._sceneObject.insert(position, child._sceneObject)
else:
# insert label node as placeholder
if isinstance(self._sceneObject, iv.Node):
self._sceneObject.insert(position, iv.Label())
return True
def removeChild(self, position):
"""Removes child"""
if position < 0 or position > len(self._children):
return False
child = self._children.pop(position)
child._parent = None
if self._sceneObject is not None:
# delete nodekit part
if isinstance(self._sceneObject, iv.BaseKit):
child._connectedTo.value = None
# remove child from scene object
if isinstance(self._sceneObject, iv.Group) and (position < len(self._sceneObject)):
del self._sceneObject[position]
# disconnect field if it was a field connection
if (child._connectedFrom is not None) and (child._connectedTo is not None):
child._connectedTo.disconnect(child._connectedFrom)
return True
def fields(self):
"""Returns field instances for this scene object"""
if self._sceneObject is None:
return []
allFields = self._sceneObject.get_field()
# remove private parts of node kit from field list
if isinstance(self._sceneObject, iv.BaseKit):
privateParts = [p["Name"] for p in self._sceneObject.get_nodekit_catalog() if not p["Public"]]
for part in privateParts:
field = next((f for f in allFields if f.get_name() == part), None)
if field is not None:
allFields.remove(field)
return allFields
def fieldValue(self, index):
"""Returns field value at given index"""
if self._sceneObject is None:
return None
fields = self.fields()
if len(fields) > index:
fieldName = fields[index].get_name()
# don't serialize value if SFNode or MFNode field
if "FNode" in fields[index].get_type():
return "..."
else:
return self._sceneObject.get(fieldName)
return None
def setFieldValue(self, index, value):
"""Sets new value for field at given index"""
if self._sceneObject is None:
return None
fields = self.fields()
if len(fields) > index:
fieldName = fields[index].get_name()
if fields[index].get_type() == "SFNode":
# this will create the part of a node kit if needed
self._sceneObject.get(fieldName)
if not value.startswith("..."):
fields[index].value = iv.create_object(value) if not value.startswith('"') else iv.create_object(name = value[1:-1])
return True
return self._sceneObject.set(fieldName, value)
return None
class QSceneGraphModel(QtCore.QAbstractItemModel):
"""
Model class whose items represent a scene graph.
"""
sortRole = QtCore.Qt.UserRole + 1
filterRole = QtCore.Qt.UserRole + 2
def __init__(self, root, parent=None):
"""Initializes scene graph model from scene object"""
super(QSceneGraphModel, self).__init__(parent)
self._rootNode = QSceneObjectProxy(root)
self._draggedNodes = None
def updateNodekit(self, field, parentIdx = QtCore.QModelIndex()):
for i in range(0, self.rowCount(parentIdx)):
idx = self.index(i, 0, parentIdx)
sceneObject = idx.internalPointer()._sceneObject
if isinstance(sceneObject, iv.BaseKit):
publicParts = [p["Name"] for p in sceneObject.get_nodekit_catalog() if p["Public"]]
if field in idx.internalPointer()._sceneObject.get_field() and not field in [c._connectedTo for c in idx.internalPointer()._children]:
if field.get_name() in publicParts and field.value is not None:
self.beginInsertRows(idx, 0, 0)
# only create, constructor already adds child to parent
childNode = QSceneObjectProxy(field.value, idx.internalPointer(), None, field)
self.endInsertRows()
self.updateNodekit(field, idx)
def updateFieldConnection(self, field, master, parentIdx = QtCore.QModelIndex()):
for i in range(0, self.rowCount(parentIdx)):
idx = self.index(i, 0, parentIdx)
if (not idx.internalPointer().isChildNode()):
if idx.internalPointer()._connectedTo == field:
# update connection of an exiting view model node
idx.internalPointer()._sceneObject = master.get_container()
idx.internalPointer()._connectedFrom = master
self.dataChanged.emit(idx, idx)
if field in idx.internalPointer()._sceneObject.get_field() and not field in [c._connectedTo for c in idx.internalPointer()._children]:
# no view model node for connection exists, create one
self.beginInsertRows(idx, 0, 0)
# only create, constructor already adds child to parent
childNode = QSceneObjectProxy(master.get_container(), idx.internalPointer(), master, field)
self.endInsertRows()
self.updateFieldConnection(field, master, idx)
def rootNode(self):
"""Returns model root node (QSceneObjectProxy)"""
return self._rootNode
def rowCount(self, parent):
"""Returns number of rows/children for given parent node"""
if not parent.isValid():
parentNode = self._rootNode
else:
parentNode = parent.internalPointer()
return parentNode.childCount()
def columnCount(self, parent):
"""Returns two as column count (type and name)"""
return 2
def mimeTypes(self):
"""Defines mime type for drag & drop"""
return ["application/SceneGraphEditor.sceneObject"]
def mimeData(self, indices):
"""Remembers nodes being dragged"""
mimeData = QtCore.QMimeData()
encodedData = QtCore.QByteArray()
stream = QtCore.QDataStream(encodedData, QtCore.QIODevice.WriteOnly)
# we simply reconnect the nodes rather than serializing them (performance)
self._draggedNodes = []
for index in indices:
if index.isValid():
if index.column() == 0:
text = self.data(index, QtCore.Qt.DisplayRole)
stream << text
self._draggedNodes.insert(0, index.internalPointer())
mimeData.setData("application/SceneGraphEditor.sceneObject", encodedData);
return mimeData;
def dropMimeData(self, data, action, row, column, parent):
"""Inserts dragged nodes into scene graph"""
if action is QtCore.Qt.IgnoreAction:
return True
if not data.hasFormat("application/SceneGraphEditor.sceneObject"):
return False
if column > 0:
return False
encodedData = data.data("application/SceneGraphEditor.sceneObject")
stream = QtCore.QDataStream(encodedData, QtCore.QIODevice.ReadOnly)
newItems = [];
rows = 0
while not stream.atEnd():
text = stream.readQString()
newItems.append(text)
rows += 1
if parent.isValid():
if not parent.internalPointer().isGroup():
row = parent.row()
parent = parent.parent()
beginRow = row;
if (row is -1):
if parent.isValid():
beginRow = parent.internalPointer().childCount()
else:
beginRow = self._rootNode.childCount()
if (self._draggedNodes is not None):
# normal case, remember nodes when drag starts
if parent.isValid() or not parent.isValid():
parentNode = self.getProxyNodeFromIndex(parent)
self.beginInsertRows(parent, beginRow, beginRow + len(self._draggedNodes) - 1)
for n in self._draggedNodes:
success = parentNode.insertChild(beginRow, QSceneObjectProxy(n.sceneObject()))
self.endInsertRows()
self._draggedNodes = None
else:
# drag and drop between different instances is unsupported, only creates types but not values
self.insertRows(beginRow, rows, parent)
for text in newItems:
idx = self.index(beginRow, 0, parent)
self.setData(idx, text)
beginRow += 1
return True
def data(self, index, role):
"""Returns type and name for scene objects"""
if not index.isValid():
return None
node = index.internalPointer()
if role == QtCore.Qt.UserRole:
return index
if role == QtCore.Qt.FontRole:
font = QtGui.QFont()
font.setItalic(not node.isChildNode())
return font
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if index.column() == 0:
return node.title()
if index.column() == 1:
return node.name()
if role == QSceneGraphModel.sortRole:
return node.type()
if role == QSceneGraphModel.filterRole:
return node.type()
def setData(self, index, value, role=QtCore.Qt.EditRole):
"""Updates types or names of scene objects"""
if index.isValid():
node = index.internalPointer()
if role == QtCore.Qt.EditRole:
if (index.column() == 0) and (index.parent() is not None):
# type changes: need to call changeChildType on parent so old
# scene object can be replaced by new one
newSceneObject = iv.create_object(value) if not value.startswith('"') else iv.create_object(name = value[1:-1])
if newSceneObject is None:
return False
# add chilren from previous type
if index.internalPointer()._sceneObject is not None:
childNodes = index.internalPointer()._sceneObject[:]
if isinstance(newSceneObject, iv.Group):
newSceneObject += childNodes
else:
self.removeRows(0, len(childNodes), index)
parentGroup = self._rootNode
if index.parent().internalPointer() is not None:
parentGroup = index.parent().internalPointer()
parentGroup.changeChildType(index.row(), newSceneObject)
if index.column() == 1:
node.setName(value)
self.dataChanged.emit(index, index)
return True
return False
def headerData(self, section, orientation, role):
"""Returns header titles for objects and names"""
if role == QtCore.Qt.DisplayRole:
if section == 0:
return "Scene Object"
else:
return "Name"
def supportedDropActions(self):
"""Returns move and copy as supported drop actions"""
return QtCore.Qt.MoveAction | QtCore.Qt.CopyAction
def flags(self, index):
"""Return flags if items can be selected and modified"""
flags = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
if isinstance(index.internalPointer(), QSceneObjectProxy) and index.internalPointer().isChildNode():
flags |= QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsDragEnabled | QtCore.Qt.ItemIsDropEnabled
if index.column() == 1:
flags |= QtCore.Qt.ItemIsEditable
return flags
def parent(self, index):
"""Returns parent proxy node"""
node = self.getProxyNodeFromIndex(index)
parentNode = node.parent()
if parentNode is None:
return QtCore.QModelIndex()
if parentNode == self._rootNode:
return QtCore.QModelIndex()
if parentNode.row() is None:
return QtCore.QModelIndex()
return self.createIndex(parentNode.row(), 0, parentNode)
def index(self, row, column, parent = QtCore.QModelIndex()):
"""Returns a QModelIndex that corresponds to the given row, column and parent node"""
parentNode = self.getProxyNodeFromIndex(parent)
childItem = None
if parentNode is not None:
if row < parentNode.childCount():
childItem = parentNode.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def getProxyNodeFromIndex(self, index):
"""Returns proxy node contained in index"""
if index.isValid():
node = index.internalPointer()
if node:
return node
return self._rootNode
def insertRows(self, position, rows, parent=QtCore.QModelIndex()):
"""Insert rows into model"""
parentNode = self.getProxyNodeFromIndex(parent)
self.beginInsertRows(parent, position, position + rows - 1)
for row in range(rows):
childCount = parentNode.childCount()
childNode = QSceneObjectProxy()
success = parentNode.insertChild(position, childNode)
self.endInsertRows()
return success
def removeRows(self, position, rows, parent=QtCore.QModelIndex()):
"""Removes rows from model"""
success = True
parentNode = self.getProxyNodeFromIndex(parent)
self.beginRemoveRows(parent, position, position + rows - 1)
for row in range(rows):
success = parentNode.removeChild(position)
self.endRemoveRows()
return success
class QFieldContainerModel(QtCore.QAbstractTableModel):
"""
Model class whose items represent a fields of a scene object.
"""
def __init__(self, root, parent=None):
"""Initializes model from a proxy node (QSceneObjectProxy)"""
super(QFieldContainerModel, self).__init__(parent)
self._rootNode = root
self._connectionRequest = ()
def rowCount(self, parent):
"""Returns number of rows / fields"""
if not parent.isValid():
parentNode = self._rootNode
else:
parentNode = parent.internalPointer()
return len(parentNode.fields())
def columnCount(self, parent):
"""Returns two as column count for field name and value"""
return 3
def data(self, index, role):
""" Returns field names and values"""
if not index.isValid():
return None
if role == QtCore.Qt.CheckStateRole and index.column() == 1:
if self._rootNode.fields()[index.row()].get_type() == "SFBool":
if self._rootNode.fieldValue(index.row()).startswith("TRUE"):
return QtCore.Qt.Checked
else:
return QtCore.Qt.Unchecked
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if index.column() == 0:
return self._rootNode.fields()[index.row()].get_name()
elif index.column() == 1:
if self._rootNode.fields()[index.row()].get_type() != "SFBool":
return self._rootNode.fieldValue(index.row())
else:
text = ""
field = self._rootNode.fields()[index.row()]
master = None
if field.get_connected_field() is not None:
master = field.get_connected_field()
if field.get_connected_engine() is not None:
master = field.get_connected_engine()
if master is not None:
if len(master.get_container().get_name()) > 0:
text = '"' + master.get_container().get_name() + '"'
else:
text = master.get_container().get_type()
text += " " + master.get_name()
return text
if role == QtCore.Qt.UserRole:
if index.column() == 1:
return self._rootNode.fields()[index.row()]
if index.column() == 2:
return self._connectionRequest
def setData(self, index, value, role=QtCore.Qt.EditRole):
"""Updates field values"""
if index.isValid() and role == QtCore.Qt.EditRole:
if index.column() == 1:
self._rootNode.setFieldValue(index.row(), value)
self.dataChanged.emit(index, index)
return True
elif index.column() == 2:
objAndField = value.split(" ")
if len(objAndField) > 0:
# trigger that connection field was edited to request the connection to be made
self._connectionRequest = (self._rootNode.fields()[index.row()], objAndField[0], objAndField[1] if len(objAndField) > 1 else "")
self.dataChanged.emit(index, index)
return True
if role == QtCore.Qt.CheckStateRole:
self._rootNode.setFieldValue(index.row(), ("FALSE", "", "TRUE")[value])
return True
return False
def headerData(self, section, orientation, role):
"""Returns field name and value titles"""
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
if section == 0:
return "Field"
elif section == 1:
return "Value"
else:
return "Connection"
return None
def flags(self, index):
"""Values are editable but names aren't"""
flags = QtCore.Qt.ItemIsEnabled
if index.column() == 1:
if self._rootNode.fields()[index.row()].get_type() == "SFBool":
flags |= QtCore.Qt.ItemIsUserCheckable;
else:
flags |= QtCore.Qt.ItemIsEditable
if index.column() == 2:
flags |= QtCore.Qt.ItemIsEditable
return flags
class QSceneGraphFilter(QtGui.QSortFilterProxyModel):
"""
This class implements a custom filter that shows the path to all
nodes in a tree model whose filter criteria are met. It can be used
as a proxy between a view and the scene graph model.
"""
def filterAcceptsRow(self, sourceRow, sourceParent):
"""Returns True if filter criteria is met by node or any of its children"""
if super(QSceneGraphFilter, self).filterAcceptsRow(sourceRow, sourceParent):
return True
if self.filterAcceptsChildren(sourceRow, sourceParent):
return True
return False
def filterAcceptsChildren(self, sourceRow, sourceParent):
"""Recursively checks if filter is met by any child"""
index = self.sourceModel().index(sourceRow, 0, sourceParent);
if not index.isValid():
return False
children = index.model().rowCount(index)
for i in range(children):
if super(QSceneGraphFilter, self).filterAcceptsRow(i, index):
return True
if self.filterAcceptsChildren(i, index):
return True
return False
class QSceneObjectTypeDelegate(QtGui.QStyledItemDelegate):
"""
Item delegate for entering a scene object type. Available types are populated
in a drop down box and input is validated against registered types in scene
object database.
"""
def __init__(self, parent=None):
"""Initializes delegate"""
super(QSceneObjectTypeDelegate, self).__init__(parent)
self.parent = parent
self._wasUninitialized = False
def createEditor(self, parent, option, index):
"""Creates combo box and populates it with all node types"""
if not index.isValid():
return False
self.currentIndex=index
self.comboBox = QtGui.QComboBox(parent)
self.comboBox.setInsertPolicy(QtGui.QComboBox.NoInsert)
items = iv.classes("Node")
items.sort()
self.comboBox.addItems(items)
self.comboBox.setEditable(True)
return self.comboBox
def setEditorData(self, editor, index):
"""Updates text in edit line"""
value = index.data(QtCore.Qt.DisplayRole)
editor.setCurrentIndex(editor.findText(value))
editor.lineEdit().selectAll()
self._wasUninitialized = (len(value) is 0)
def setModelData(self, editor, model, index):
"""Updates scene graph model after input"""
if not index.isValid():
return False
typeName = editor.currentText()
if typeName.startswith('"') and typeName.endswith('"'):
# if type name is in quotes, interpret as instance name
sceneObject = iv.create_object(name = typeName[1:-1])
if sceneObject is None:
return False
elif editor.findText(typeName) < 0:
if self._wasUninitialized:
# confirmed but invalid: remove row
index.model().removeRow(index.row(), index.parent())
return True
# not a valid entry
return False
index.model().setData(index, typeName, QtCore.Qt.EditRole)
self.parent.setCurrentIndex(index)
return True
def eventFilter(self, editor, event):
"""Deselect text on enter and 'commits' empty cells for removal from model"""
if event.type() is QtCore.QEvent.KeyPress:
if event.key() in [QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter]:
# need to deselect any text so it is not cleared before saving to model
editor.lineEdit().deselect()
if len(editor.currentText()) is 0:
# do not allow confirmation with nothing selected
return False
if event.key() in [QtCore.Qt.Key_Escape, QtCore.Qt.Key_Cancel]:
if self._wasUninitialized:
# this was an uninitialized row:
# trigger commit so setModelData() removes row
self.commitData.emit(editor)
return super(QSceneObjectTypeDelegate, self).eventFilter(editor,event)
def sizeHint(self, option, index):
"""Returns default size for items, enlarged from default size for combo box"""
return QtCore.QSize(200, 19)
class QFieldValueDelegate(QtGui.QStyledItemDelegate):
"""
Item delegate for entering field values. A combo box is created if the
field type is SFEnum. Otherwise a text input is used.
"""
def createEditor(self, parent, option, index):
"""Creates field edit control depending on type."""
editor = None
field = index.model().data(index, QtCore.Qt.UserRole)
if field.get_enums() is not None:
editor = QtGui.QComboBox(parent)
editor.setInsertPolicy(QtGui.QComboBox.NoInsert)
editor.addItems(field.get_enums())
if field.get_type() != "SFEnum":
editor.setEditable(True)
else:
editor = QtGui.QLineEdit(parent)
return editor
def setEditorData(self, editor, index):
"""Updates text in edit line"""
value = index.data(QtCore.Qt.DisplayRole)
if isinstance(editor, QtGui.QComboBox):
if editor.isEditable():
editor.setEditText(value)
else:
editor.setCurrentIndex(editor.findText(value))
else:
editor.setText(value)
def setModelData(self, editor, model, index):
"""Updates field value after input"""
value = ""
if isinstance(editor, QtGui.QComboBox):
value = editor.currentText()
else:
value = editor.text()
index.model().setData(index, value, QtCore.Qt.EditRole)
return True
class QInspectorWidget(QtGui.QSplitter):
"""
Widget for inspecting and editing scene grpahs. It shows the structure
of a scene as a tree as well as a fields editor.
"""
def __init__(self, parent=None):
"""Initializes inspector widget consisting of scene and field editor"""
super(QInspectorWidget, self).__init__(QtCore.Qt.Vertical, parent)
self._filterEdit = QtGui.QLineEdit()
self._graphView = QtGui.QTreeView()
self._fieldView = QtGui.QTableView()
ctrlLayout = QtGui.QVBoxLayout()
ctrlLayout.setContentsMargins(0, 0, 0, 0)
ctrlLayout.setSpacing(1)
self._filterEdit.setPlaceholderText("Search All Scene Objects")
ctrlLayout.addWidget(self._filterEdit)
ctrlLayout.addWidget(self._graphView)
ctrlWidget = QtGui.QWidget()
ctrlWidget.setLayout(ctrlLayout)
self.addWidget(ctrlWidget)
self.addWidget(self._fieldView)
self.setStretchFactor(0, 2)
self._proxyModel = QSceneGraphFilter(self)
self._sceneModel = QSceneGraphModel(iv.Separator(), self)
self._fieldsModel = None
self._proxyModel.setSourceModel(self._sceneModel)
self._proxyModel.setDynamicSortFilter(True)
self._proxyModel.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self._proxyModel.setSortRole(QSceneGraphModel.sortRole)
self._proxyModel.setFilterRole(QSceneGraphModel.filterRole)
self._proxyModel.setFilterKeyColumn(0)
self._graphView.setModel(self._proxyModel)
self._graphView.setDragEnabled(True)
self._graphView.setAcceptDrops(True)
self._graphView.setDropIndicatorShown(True)
self._graphView.setDragDropMode(QtGui.QAbstractItemView.DragDrop)
self._graphView.setDefaultDropAction(QtCore.Qt.MoveAction)
self._graphView.setItemDelegateForColumn(0, QSceneObjectTypeDelegate(self))
self._graphView.setSelectionMode(QtGui.QAbstractItemView.SelectionMode.ExtendedSelection)
self._fieldView.horizontalHeader().setStretchLastSection(True)
self._fieldView.verticalHeader().setResizeMode(QtGui.QHeaderView.Fixed)
self._fieldView.verticalHeader().setDefaultSectionSize(0.8 * self._fieldView.verticalHeader().defaultSectionSize())
self._fieldView.verticalHeader().hide()
self._fieldView.setAlternatingRowColors(True)
self._fieldView.setWordWrap(False)
self._fieldView.setShowGrid(False)
self._fieldView.setItemDelegateForColumn(1, QFieldValueDelegate(self))
QtCore.QObject.connect(self._graphView.selectionModel(), QtCore.SIGNAL("currentChanged(QModelIndex, QModelIndex)"), self.setSelection)
QtCore.QObject.connect(self._filterEdit, QtCore.SIGNAL("textChanged(QString)"), self.setFilter)
def attach(self, rootNode):
"""Attaches the scene and field editors to a scene graph"""
if rootNode is not None:
# reset old models
self._proxyModel.setSourceModel(None)
self._fieldView.setModel(None)
# create new ones
self._sceneModel = QSceneGraphModel(rootNode, self)
self._proxyModel.setSourceModel(self._sceneModel)
self._graphView.setColumnWidth(0, 360)
self._graphView.setColumnWidth(1, 100)
self._graphView.header().setStretchLastSection(True)
self._graphView.expandAll()
self._graphView.setFocus(QtCore.Qt.OtherFocusReason)
def setFilter(self, filter):
"""Updates filter used in tree view, linked to filter edito box"""
self._proxyModel.setFilterFixedString(filter)
self._graphView.expandAll()
self._graphView.scrollTo(self._graphView.currentIndex())
def setCurrentIndex(self, index):
"""Sets current selection in scene graph tree view"""
self._graphView.setCurrentIndex(index)
def setSelection(self, current, old):
"""Updates field editor after selection in tree view changed"""
if current.isValid() and current.data(QtCore.Qt.UserRole) is not None:
isFirst = self._fieldsModel is None
self._fieldsModel = QFieldContainerModel(current.data(QtCore.Qt.UserRole).internalPointer())
self._fieldView.setModel(self._fieldsModel)
QtCore.QObject.connect(self._fieldView.model(), QtCore.SIGNAL("dataChanged(QModelIndex, QModelIndex)"), self.fieldChanged)
if isFirst:
self._fieldView.setColumnWidth(0, 150)
self._fieldView.setColumnWidth(1, 200)
self._fieldView.setColumnWidth(2, 100)
def addFieldConnection(self, field, typeName, masterName):
sceneObject = None
if typeName.startswith('"') and typeName.endswith('"'):
# if type name is in quotes, interpret as instance name
sceneObject = iv.create_object(name = typeName[1:-1])
else:
typeAndArgs = typeName.split("(")
if len(typeAndArgs) > 1:
# support initialization arguments in brackets for templated types (e.g. Gate)
sceneObject = iv.create_object(type = typeAndArgs[0], init = typeAndArgs[1][:-1])
else:
# no round brackets means just type name is given
# check for template types and add type argument based on field type
initString = ""
if typeName in ("Gate", "SelectOne", "Concatenate"):
initString = field.get_type().replace("SF", "MF", 1)
if typeName in iv.classes("FieldContainer"):
sceneObject = iv.create_object(type = typeName, init = initString)
if sceneObject is not None:
master = None
if isinstance(sceneObject, iv.Engine):
master = sceneObject.get_output(masterName) if len(masterName) > 0 else sceneObject.get_output()[0] if len(sceneObject.get_output()) > 0 else None
if master is None:
master = sceneObject.get_field(masterName) if len(masterName) > 0 else sceneObject.get_field()[0] if len(sceneObject.get_field()) > 0 else None
if field is not None and master is not None:
if field.connect_from(master):
self._sceneModel.updateFieldConnection(field, master)
return True
return False
def fieldChanged(self, current, old):
"""Updates field editor after selection in tree view changed"""
if current.isValid():
if current.column() == 2:
connectionDetail = current.data(QtCore.Qt.UserRole)
self.addFieldConnection(connectionDetail[0], connectionDetail[1], connectionDetail[2])
elif current.column() == 1:
self._sceneModel.updateNodekit(current.data(QtCore.Qt.UserRole))
def keyPressEvent(self, event):
"""Handles default keyboard events for insert and delete"""
if event.key() in [QtCore.Qt.Key_Delete, QtCore.Qt.Key_Backspace]:
self.deleteObject()
if event.key() in [QtCore.Qt.Key_Insert]:
self.insertObject()
if event.key() in [QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter]:
self.appendObject()
if event.modifiers():
# allow insert with cursor keys while modifier key is pressed
if event.key() == QtCore.Qt.Key_Up:
self.insertObject()
if event.key() == QtCore.Qt.Key_Down:
self.appendObject()
super(QInspectorWidget, self).keyPressEvent(event)
def sizeHint(self):
"""Returns default widget size"""
return QtCore.QSize(512, 512)
def deleteObject(self):
"""Deletes all scene objects currently selected in tree view"""
indices = self._graphView.selectionModel().selectedIndexes()
dataIndices = []
parentNode = None
for index in indices:
if index.isValid():
if index.column() is 0:
i = index.data(QtCore.Qt.UserRole)
parentNode = i.parent().internalPointer()
dataIndices.append(i)
for index in reversed(dataIndices):
if index.isValid():
self._sceneModel.removeRow(index.row(), index.parent())
def appendObject(self):
"""Appends a new scene object after current selection"""
viewIndex = self._graphView.currentIndex()
if viewIndex.isValid():
dataIndex = viewIndex.data(QtCore.Qt.UserRole)
if dataIndex.internalPointer().isChildNode():
if self._sceneModel.insertRow(dataIndex.row() + 1, dataIndex.parent()):
viewIndex = viewIndex.sibling(viewIndex.row() + 1, viewIndex.column())
self._fieldView.setModel(None)
self._graphView.edit(viewIndex.sibling(viewIndex.row(), 0))
self._graphView.clearSelection()
def insertObject(self):
"""Inserts a new scene object before current selection"""
viewIndex = self._graphView.currentIndex()
if viewIndex.isValid():
dataIndex = viewIndex.data(QtCore.Qt.UserRole)
if dataIndex.internalPointer().isChildNode():
if self._sceneModel.insertRow(dataIndex.row(), dataIndex.parent()):
self._fieldView.setModel(None)
dataIndex = viewIndex.data(QtCore.Qt.UserRole)
self._graphView.edit(viewIndex.sibling(viewIndex.row(), 0))
self._graphView.clearSelection()
| 37.803704
| 162
| 0.602185
|
e0ec5e657a417405b0267127ababf766e9659b29
| 316
|
py
|
Python
|
src/basics/scripts/service_client.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
src/basics/scripts/service_client.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
src/basics/scripts/service_client.py
|
kaiodt/kaio_ros_ws
|
d9ee0edb97d16cf2a0a6074fecd049db7367a032
|
[
"BSD-2-Clause"
] | null | null | null |
#! /usr/bin/env python
import rospy
from basics.srv import WordCount
import sys
rospy.init_node('service_client')
rospy.wait_for_service('word_count')
word_counter = rospy.ServiceProxy('word_count', WordCount)
words = ' '.join(sys.argv[1:])
word_count = word_counter(words)
print words, '->', word_count.count
| 18.588235
| 58
| 0.756329
|
3a40f9149db442a5c573b5b7b9778fb1f31149bc
| 12,399
|
py
|
Python
|
scipy/io/wavfile.py
|
zerothi/scipy
|
396a3592767c7477d14083053ac7b772951f125e
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/io/wavfile.py
|
zerothi/scipy
|
396a3592767c7477d14083053ac7b772951f125e
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/io/wavfile.py
|
zerothi/scipy
|
396a3592767c7477d14083053ac7b772951f125e
|
[
"BSD-3-Clause"
] | 1
|
2020-08-06T07:23:10.000Z
|
2020-08-06T07:23:10.000Z
|
"""
Module to read / write wav files using numpy arrays
Functions
---------
`read`: Return the sample rate (in samples/sec) and data from a WAV file.
`write`: Write a numpy array as a WAV file.
"""
from __future__ import division, print_function, absolute_import
import sys
import numpy
import struct
import warnings
__all__ = [
'WavFileWarning',
'read',
'write'
]
class WavFileWarning(UserWarning):
pass
WAVE_FORMAT_PCM = 0x0001
WAVE_FORMAT_IEEE_FLOAT = 0x0003
WAVE_FORMAT_EXTENSIBLE = 0xfffe
KNOWN_WAVE_FORMATS = (WAVE_FORMAT_PCM, WAVE_FORMAT_IEEE_FLOAT)
# assumes file pointer is immediately
# after the 'fmt ' id
def _read_fmt_chunk(fid, is_big_endian):
"""
Returns
-------
size : int
size of format subchunk in bytes (minus 8 for "fmt " and itself)
format_tag : int
PCM, float, or compressed format
channels : int
number of channels
fs : int
sampling frequency in samples per second
bytes_per_second : int
overall byte rate for the file
block_align : int
bytes per sample, including all channels
bit_depth : int
bits per sample
"""
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = res = struct.unpack(fmt+'I', fid.read(4))[0]
bytes_read = 0
if size < 16:
raise ValueError("Binary structure of wave file is not compliant")
res = struct.unpack(fmt+'HHIIHH', fid.read(16))
bytes_read += 16
format_tag, channels, fs, bytes_per_second, block_align, bit_depth = res
if format_tag == WAVE_FORMAT_EXTENSIBLE and size >= (16+2):
ext_chunk_size = struct.unpack(fmt+'H', fid.read(2))[0]
bytes_read += 2
if ext_chunk_size >= 22:
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[2+4:2+4+16]
# GUID template {XXXXXXXX-0000-0010-8000-00AA00389B71} (RFC-2361)
# MS GUID byte order: first three groups are native byte order,
# rest is Big Endian
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71'
if raw_guid.endswith(tail):
format_tag = struct.unpack(fmt+'I', raw_guid[:4])[0]
else:
raise ValueError("Binary structure of wave file is not compliant")
if format_tag not in KNOWN_WAVE_FORMATS:
raise ValueError("Unknown wave file format")
# move file pointer to next chunk
if size > (bytes_read):
fid.read(size - bytes_read)
return (size, format_tag, channels, fs, bytes_per_second, block_align,
bit_depth)
# assumes file pointer is immediately after the 'data' id
def _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian,
mmap=False):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
# Size of the data subchunk in bytes
size = struct.unpack(fmt, fid.read(4))[0]
# Number of bytes per sample
bytes_per_sample = bit_depth//8
if bit_depth == 8:
dtype = 'u1'
else:
if is_big_endian:
dtype = '>'
else:
dtype = '<'
if format_tag == WAVE_FORMAT_PCM:
dtype += 'i%d' % bytes_per_sample
else:
dtype += 'f%d' % bytes_per_sample
if not mmap:
data = numpy.fromstring(fid.read(size), dtype=dtype)
else:
start = fid.tell()
data = numpy.memmap(fid, dtype=dtype, mode='c', offset=start,
shape=(size//bytes_per_sample,))
fid.seek(start + size)
if channels > 1:
data = data.reshape(-1, channels)
return data
def _skip_unknown_chunk(fid, is_big_endian):
if is_big_endian:
fmt = '>I'
else:
fmt = '<I'
data = fid.read(4)
# call unpack() and seek() only if we have really read data from file
# otherwise empty read at the end of the file would trigger
# unnecessary exception at unpack() call
# in case data equals somehow to 0, there is no need for seek() anyway
if data:
size = struct.unpack(fmt, data)[0]
fid.seek(size, 1)
def _read_riff_chunk(fid):
str1 = fid.read(4) # File signature
if str1 == b'RIFF':
is_big_endian = False
fmt = '<I'
elif str1 == b'RIFX':
is_big_endian = True
fmt = '>I'
else:
# There are also .wav files with "FFIR" or "XFIR" signatures?
raise ValueError("File format {}... not "
"understood.".format(repr(str1)))
# Size of entire file
file_size = struct.unpack(fmt, fid.read(4))[0] + 8
str2 = fid.read(4)
if str2 != b'WAVE':
raise ValueError("Not a WAV file.")
return file_size, is_big_endian
def read(filename, mmap=False):
"""
Open a WAV file
Return the sample rate (in samples/sec) and data from a WAV file.
Parameters
----------
filename : string or open file handle
Input wav file.
mmap : bool, optional
Whether to read data as memory-mapped.
Only to be used on real files (Default: False).
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of wav file.
data : numpy array
Data read from wav file. Data-type is determined from the file;
see Notes.
Notes
-----
This function cannot read wav files with 24-bit data.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www-mmsp.ece.mcgill.ca/documents/audioformats/wave/Docs/riffmci.pdf
"""
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
file_size, is_big_endian = _read_riff_chunk(fid)
fmt_chunk_received = False
channels = 1
bit_depth = 8
format_tag = WAVE_FORMAT_PCM
while fid.tell() < file_size:
# read the next chunk
chunk_id = fid.read(4)
if chunk_id == b'fmt ':
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
if bit_depth not in (8, 16, 32, 64, 128):
raise ValueError("Unsupported bit depth: the wav file "
"has {}-bit data.".format(bit_depth))
elif chunk_id == b'fact':
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id == b'data':
if not fmt_chunk_received:
raise ValueError("No fmt chunk before data")
data = _read_data_chunk(fid, format_tag, channels, bit_depth,
is_big_endian, mmap)
elif chunk_id == b'LIST':
# Someday this could be handled properly but for now skip it
_skip_unknown_chunk(fid, is_big_endian)
elif chunk_id in (b'JUNK', b'Fake'):
# Skip alignment chunks without warning
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn("Chunk (non-data) not understood, skipping it.",
WavFileWarning)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
return fs, data
def write(filename, rate, data):
"""
Write a numpy array as a WAV file.
Parameters
----------
filename : string or open file handle
Output wav file.
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D numpy array of either integer or float data-type.
Notes
-----
* Writes a simple uncompressed WAV file.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
* The bits-per-sample and PCM/float will be determined by the data-type.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www-mmsp.ece.mcgill.ca/documents/audioformats/wave/Docs/riffmci.pdf
"""
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
data.dtype.itemsize == 1)):
raise ValueError("Unsupported data type '%s'" % data.dtype)
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
# fmt chunk
header_data += b'fmt '
if dkind == 'f':
format_tag = WAVE_FORMAT_IEEE_FLOAT
else:
format_tag = WAVE_FORMAT_PCM
if data.ndim == 1:
channels = 1
else:
channels = data.shape[1]
bit_depth = data.dtype.itemsize * 8
bytes_per_second = fs*(bit_depth // 8)*channels
block_align = channels * (bit_depth // 8)
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
bytes_per_second, block_align, bit_depth)
if not (dkind == 'i' or dkind == 'u'):
# add cbSize field for non-PCM files
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
# fact chunk (non-PCM files)
if not (dkind == 'i' or dkind == 'u'):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
# check data size (needs to be immediately before the data chunk)
if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
raise ValueError("Data exceeds wave file size limit")
fid.write(header_data)
# data chunk
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
sys.byteorder == 'big'):
data = data.byteswap()
_array_tofile(fid, data)
# Determine file size and place it in correct
# position at start of the file.
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', size-8))
finally:
if not hasattr(filename, 'write'):
fid.close()
else:
fid.seek(0)
if sys.version_info[0] >= 3:
def _array_tofile(fid, data):
# ravel gives a c-contiguous buffer
fid.write(data.ravel().view('b').data)
else:
def _array_tofile(fid, data):
fid.write(data.tostring())
| 30.9975
| 81
| 0.548593
|
fcfad5e7126b31ccc3798116be6394d06468e2bf
| 2,605
|
py
|
Python
|
src/python/pants/backend/project_info/tasks/dependencies.py
|
silverguo/pants
|
141510d03fbf2b7e1a0b54f66b54088697f6fa51
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/project_info/tasks/dependencies.py
|
silverguo/pants
|
141510d03fbf2b7e1a0b54f66b54088697f6fa51
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/project_info/tasks/dependencies.py
|
silverguo/pants
|
141510d03fbf2b7e1a0b54f66b54088697f6fa51
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.backend.project_info.rules.dependencies import DependencyType
from pants.base.payload_field import JarsField, PythonRequirementsField
from pants.task.console_task import ConsoleTask
from pants.util.ordered_set import OrderedSet
class Dependencies(ConsoleTask):
"""Print the target's dependencies."""
@staticmethod
def _is_jvm(target):
return isinstance(target, (JarLibrary, JvmTarget, JvmApp))
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--type",
type=DependencyType,
default=DependencyType.SOURCE,
help="Which types of dependencies to find, where `source` means source code dependencies "
"and `3rdparty` means third-party requirements and JARs.",
)
def console_output(self, unused_method_argument):
ordered_closure = OrderedSet()
for target in self.context.target_roots:
if self.act_transitively:
target.walk(ordered_closure.add)
else:
ordered_closure.update(target.dependencies)
include_source = self.get_options().type in [
DependencyType.SOURCE,
DependencyType.SOURCE_AND_THIRD_PARTY,
]
include_3rdparty = self.get_options().type in [
DependencyType.THIRD_PARTY,
DependencyType.SOURCE_AND_THIRD_PARTY,
]
for tgt in ordered_closure:
if include_source:
yield tgt.address.spec
if include_3rdparty:
# TODO(John Sirois): We need an external payload abstraction at which point knowledge
# of jar and requirement payloads can go and this hairball will be untangled.
if isinstance(tgt.payload.get_field("requirements"), PythonRequirementsField):
for requirement in tgt.payload.requirements:
yield str(requirement.requirement)
elif isinstance(tgt.payload.get_field("jars"), JarsField):
for jar in tgt.payload.jars:
data = dict(org=jar.org, name=jar.name, rev=jar.rev)
yield ("{org}:{name}:{rev}" if jar.rev else "{org}:{name}").format(**data)
| 43.416667
| 102
| 0.65643
|
43e0f5b39b8b7188e06291f5a44da32e2cf47254
| 2,316
|
py
|
Python
|
cm_models/ascad_r_value/hyper_parameters.py
|
AISyLab/RL-based-countermeasure-design-for-SCA
|
f03895b4b13b0397f0cc7014d9e7d2738ff2a6a2
|
[
"MIT"
] | null | null | null |
cm_models/ascad_r_value/hyper_parameters.py
|
AISyLab/RL-based-countermeasure-design-for-SCA
|
f03895b4b13b0397f0cc7014d9e7d2738ff2a6a2
|
[
"MIT"
] | null | null | null |
cm_models/ascad_r_value/hyper_parameters.py
|
AISyLab/RL-based-countermeasure-design-for-SCA
|
f03895b4b13b0397f0cc7014d9e7d2738ff2a6a2
|
[
"MIT"
] | null | null | null |
from sklearn import preprocessing
from . import state_space_parameters as ssp
import countermeasures.data_loader as data_loader
import numpy as np
import tensorflow as tf
MODEL_NAME = 'ASCAD_R_Value'
# Number of output neurons
NUM_CLASSES = 256 # Number of output neurons
# Input Size
INPUT_SIZE = 1400
# Batch Queue parameters
TRAIN_BATCH_SIZE = 400 # Batch size for training (scaled linearly with number of gpus used)
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 100_000 # Number of training examples
VALIDATION_FROM_ATTACK_SET = True
EVAL_BATCH_SIZE = TRAIN_BATCH_SIZE # Batch size for validation
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 5000 # Number of validation examples
MAX_EPOCHS = 50 # Max number of epochs to train model
# Training Parameters
OPTIMIZER = 'Adam' # Optimizer (should be in caffe format string)
MAX_LR = 5e-3 # The max LR (scaled linearly with number of gpus used)
# Bulk data folder
BULK_ROOT = '/tudelft.net/staff-bulk/ewi/insy/CYS/spicek/jrijsdijk/rl-paper/ASCAD_R/cm_experiment_value/'
DATA_ROOT = BULK_ROOT + '../data/'
# Trained model dir
TRAINED_MODEL_DIR = BULK_ROOT + 'trained_models'
DB_FILE = DATA_ROOT + 'ascad-variable.h5'
(TRAIN_TRACES, TRAIN_LABELS), (ATTACK_TRACES, ATTACK_LABELS), ATTACK_PLAINTEXT = data_loader.load_hd5(
DB_FILE,
'/Profiling_traces/traces', '/Profiling_traces/labels',
'/Attack_traces/traces', '/Attack_traces/labels',
'/Attack_traces/metadata'
)
NOISE_SCALE = data_loader.get_noise_scale(TRAIN_TRACES)
USE_OCLR = True
MODEL_PREPROCESSING = [
preprocessing.StandardScaler()
]
MODEL_LAYERS = [
tf.keras.layers.Conv1D(128, 3, kernel_initializer='he_uniform', activation='selu', padding='same'),
tf.keras.layers.AveragePooling1D(75, strides=75),
tf.keras.layers.Flatten(name='flatten'),
tf.keras.layers.Dense(30, kernel_initializer='he_uniform', activation='selu'),
tf.keras.layers.Dense(2, kernel_initializer='he_uniform', activation='selu'),
tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
KEY = np.load(DATA_ROOT + 'attack_key.npy')
ATTACK_KEY_BYTE = 2
ATTACK_PRECOMPUTED_BYTE_VALUES = np.load(DATA_ROOT + f'attack_precomputed_byte{ATTACK_KEY_BYTE}_values.npy')
TRACES_PER_ATTACK = 2000 # Maximum number of traces to use per attack
NUM_ATTACKS = 20 # Number of attacks to average the GE over
| 34.567164
| 108
| 0.772453
|
2e2945c791cf18a925ced5c2561e41bbf994d09d
| 424
|
py
|
Python
|
hpc-historias-clinicas/contacto/forms.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/contacto/forms.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/contacto/forms.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django import forms
class ContactoForm(forms.Form):
"""
Formulario para el contacto con
soporte técnico
"""
nombre = forms.CharField(widget=forms.TextInput, required=True)
email = forms.EmailField(required=True)
telefono = forms.CharField(widget=forms.TextInput, required=False, label='Teléfono')
mensaje = forms.CharField(widget=forms.Textarea, required=True)
| 30.285714
| 88
| 0.712264
|
384c2da51c31fb7c63ba117d74729beae01ee6ba
| 15,042
|
py
|
Python
|
baselines/baselines/ppo2/ppo2.py
|
amiranas/flow_rl
|
9e6e69991c22bb85a9aeb2d5399bbade1e3bb682
|
[
"Apache-2.0"
] | 26
|
2019-01-11T05:20:53.000Z
|
2022-03-28T11:25:33.000Z
|
baselines/baselines/ppo2/ppo2.py
|
amiranas/flow_rl
|
9e6e69991c22bb85a9aeb2d5399bbade1e3bb682
|
[
"Apache-2.0"
] | 1
|
2019-04-19T17:35:31.000Z
|
2019-04-30T09:03:23.000Z
|
baselines/baselines/ppo2/ppo2.py
|
amiranas/flow_rl
|
9e6e69991c22bb85a9aeb2d5399bbade1e3bb682
|
[
"Apache-2.0"
] | 10
|
2019-03-19T08:21:37.000Z
|
2022-03-11T09:05:35.000Z
|
import os
import os.path as osp
import time
from collections import deque
from os.path import join as p_join
import numpy as np
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from baselines import logger
from baselines.common import explained_variance
from baselines.flow_rl_utils import network
from baselines.ppo2.policies import MImVecPolicy, MImVecLstmPolicy, MImVecLnLstmPolicy
class Model(object):
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm, add_flownet,
flownet_path, flownet,
train_from_scratch, large_cnn,
add_predicted_flow_to_vec, diff_frames):
if policy not in (MImVecPolicy, MImVecLstmPolicy, MImVecLnLstmPolicy):
raise NotImplementedError
sess = tf.get_default_session()
act_model = policy(sess, ob_space, ac_space, nbatch_act, 1, reuse=False,
add_flownet=add_flownet,
flownet=flownet,
train_from_scratch=train_from_scratch,
large_cnn=large_cnn,
add_predicted_flow_to_vec=add_predicted_flow_to_vec,
diff_frames=diff_frames)
train_model = policy(sess, ob_space, ac_space, nbatch_train, nsteps, reuse=True,
add_flownet=add_flownet,
flownet=flownet,
train_from_scratch=train_from_scratch,
large_cnn=large_cnn,
add_predicted_flow_to_vec=add_predicted_flow_to_vec,
diff_frames=diff_frames)
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [None])
R = tf.placeholder(tf.float32, [None])
OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
OLDVPRED = tf.placeholder(tf.float32, [None])
LR = tf.placeholder(tf.float32, [])
CLIPRANGE = tf.placeholder(tf.float32, [])
neglogpac = train_model.pd.neglogp(A)
entropy = tf.reduce_mean(train_model.pd.entropy())
vpred = train_model.vf
vpredclipped = OLDVPRED + tf.clip_by_value(
train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)
vf_losses1 = tf.square(vpred - R)
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(OLDNEGLOGPAC - neglogpac)
pg_losses = -ADV * ratio
pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))
clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef
with tf.variable_scope('model'):
params = tf.trainable_variables()
grads = tf.gradients(loss, params)
if max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads = list(zip(grads, params))
trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
_train = trainer.apply_gradients(grads)
def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
if isinstance(obs, dict):
td_map = {A: actions, ADV: advs, R: returns, LR: lr,
CLIPRANGE: cliprange, OLDNEGLOGPAC: neglogpacs, OLDVPRED: values}
for key, value in train_model.placeholder_dict.items():
td_map[value] = obs[key]
else:
td_map = {train_model.X: obs, A: actions, ADV: advs, R: returns, LR: lr,
CLIPRANGE: cliprange, OLDNEGLOGPAC: neglogpacs, OLDVPRED: values}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
return sess.run(
[pg_loss, vf_loss, entropy, approxkl, clipfrac, _train],
td_map
)[:-1]
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
self.saver = tf.train.Saver(max_to_keep=20)
def save(save_path, id):
self.saver.save(sess, p_join(save_path, "model{}.ckpt".format(id)))
def load(load_path, id):
self.saver.restore(sess, p_join(load_path, "model{}.ckpt".format(id)))
self.train = train
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = save
self.load = load
tf.global_variables_initializer().run(session=sess) # pylint: disable=E1101
if add_flownet:
if not train_from_scratch:
assert flownet_path != ""
checkpoint_path = p_join(flownet_path, "flow.ckpt")
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
var_to_shape_map = reader.get_variable_to_shape_map()
l = [var for var in var_to_shape_map
if 'Adam' not in var and 'step' not in var and 'beta' not in var]
lp = [[var, reader.get_tensor(var)[1].shape] for var in var_to_shape_map
if 'Adam' not in var and 'step' not in var and 'beta' not in var]
d = {var: tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model/' + var)[0]
for var in l}
flow_saver = tf.train.Saver(d)
flow_saver.restore(sess, checkpoint_path)
if train_from_scratch:
tf.global_variables_initializer().run(session=sess)
class ImVecRunner(object):
def __init__(self, *, env, model, nsteps, gamma, lam):
self.env = env
self.model = model
self.nenv = env.num_envs
self.obs = env.reset()
self.gamma = gamma
self.lam = lam
self.nsteps = nsteps
self.states = model.initial_state
self.dones = [False for _ in range(self.nenv)]
self.highest_reward = - float('inf')
def run(self, update_fraction):
mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], []
mb_obs = {}
for key in self.obs:
mb_obs[key] = []
mb_states = self.states
epinfos = []
for _ in range(self.nsteps):
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states,
self.dones)
for key, value in self.obs.items():
mb_obs[key].append(value.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
self.obs, rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo:
epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
# batch of steps to batch of rollouts
for key, value in mb_obs.items():
mb_obs[key] = np.asarray(value, dtype=value[0].dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
# mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(dsf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
def dsf01(arr):
"""
swap and then flatten axes 0 and 1
"""
if isinstance(arr, dict):
for key, value in arr.items():
s = value.shape
arr[key] = value.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
return arr
else:
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def constfn(val):
def f(_):
return val
return f
def learn(*, policy, env, nsteps, total_timesteps, ent_coef, lr,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, add_flownet=False, flownet_path="",
flow_key=None,
train_from_scratch=False,
large_cnn=False, add_predicted_flow_to_vec=False,
diff_frames=False):
if isinstance(lr, float):
lr = constfn(lr)
else:
assert callable(lr)
if isinstance(cliprange, float):
cliprange = constfn(cliprange)
else:
assert callable(cliprange)
total_timesteps = int(total_timesteps)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
if flow_key is not None:
flownet = network.flow_dict[flow_key]
else:
flownet = None
def make_model():
return Model(
policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs,
nbatch_train=nbatch_train, nsteps=nsteps, ent_coef=ent_coef,
vf_coef=vf_coef, max_grad_norm=max_grad_norm, add_flownet=add_flownet,
flownet_path=flownet_path,
flownet=flownet,
train_from_scratch=train_from_scratch,
large_cnn=large_cnn, add_predicted_flow_to_vec=add_predicted_flow_to_vec,
diff_frames=diff_frames
)
if save_interval and logger.get_dir():
import cloudpickle
with open(osp.join(logger.get_dir(), 'make_model.pkl'), 'wb') as fh:
fh.write(cloudpickle.dumps(make_model))
model = make_model()
if isinstance(ob_space, dict):
runner = ImVecRunner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
else:
raise NotImplementedError
epinfobuf = deque(maxlen=100)
tfirststart = time.time()
nupdates = total_timesteps//nbatch
model_save_interval = nupdates // (total_timesteps // int(1e6))
saved_model_id = 0
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
nbatch_train = nbatch // nminibatches
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lrnow = lr(frac)
cliprangenow = cliprange(frac)
# pylint: disable=E0632:
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run(
np.float(update) / np.float(nupdates))
epinfobuf.extend(epinfos)
mblossvals = []
if states is None: # nonrecurrent version
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (get_part(arr, mbinds) for arr in (obs, returns, masks, actions,
values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
# envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
envsperbatch = nbatch_train // nsteps
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (get_part(arr, mbflatinds) for arr in (obs, returns, masks, actions,
values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
if (update - 1) % model_save_interval == 0:
if isinstance(ob_space, dict):
save_path = p_join(logger.get_dir(), "saves")
if not os.path.isdir(save_path):
os.makedirs(save_path)
model.save(save_path, saved_model_id)
env.save_norm(save_path, saved_model_id)
saved_model_id += 1
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
if isinstance(ob_space, dict):
logger.logkv("highest_reward", runner.highest_reward)
logger.dumpkvs()
env.close()
def get_part(l, mb):
if isinstance(l, dict):
out = {}
for key, value in l.items():
out[key] = value[mb]
return out
else:
return l[mb]
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
| 41.324176
| 99
| 0.587156
|
896f470180acf77e502e0a1dd23ea2e8cf3894de
| 2,587
|
py
|
Python
|
tests/st/ops/ascend/vector/test_fused_mean_mul_001.py
|
KnowingNothing/akg-test
|
114d8626b824b9a31af50a482afc07ab7121862b
|
[
"Apache-2.0"
] | 286
|
2020-06-23T06:40:44.000Z
|
2022-03-30T01:27:49.000Z
|
tests/st/ops/ascend/vector/test_fused_mean_mul_001.py
|
KnowingNothing/akg-test
|
114d8626b824b9a31af50a482afc07ab7121862b
|
[
"Apache-2.0"
] | 10
|
2020-07-31T03:26:59.000Z
|
2021-12-27T15:00:54.000Z
|
tests/st/ops/ascend/vector/test_fused_mean_mul_001.py
|
KnowingNothing/akg-test
|
114d8626b824b9a31af50a482afc07ab7121862b
|
[
"Apache-2.0"
] | 30
|
2020-07-17T01:04:14.000Z
|
2021-12-27T14:05:19.000Z
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from tests.common.base import TestBase
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_fused_mean_mul_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag,opfuncname,testRunArgs, dimArgs
("001_fused_mean_mul", "fused_mean_mul_run", ((8, 3), (3,), 'float16', (0,), False, 'cce_mean_mul_fp16'), ),
("002_fused_mean_mul", "fused_mean_mul_run", ((8, 3), (8,), 'float16', (1,), False, 'cce_mean_mul_fp16'), ),
("003_fused_mean_mul", "fused_mean_mul_run", ((64, 128, 1024), (64, 128), 'float16', (2,), False, 'cce_mean_mul_fp16'), ),
("004_fused_mean_mul", "fused_mean_mul_run", ((32, 128, 7, 7, 16), (32, 128, 16), 'float16', (2, 3), False, 'cce_mean_mul_fp16'), ),
]
self.testarg_rpc_cloud = [
]
self.testarg_cloud_level0 = [
]
self.testarg_level2 = [
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def test_run_rpc_cloud(self):
self.common_run([self.testarg_rpc_cloud[15], self.testarg_rpc_cloud[-1]])
def test_run_cloud_level0(self):
self.common_run(self.testarg_cloud_level0)
def test_run_level2(self):
self.common_run(self.testarg_level2)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
if __name__ == "__main__":
t = TestCase()
t.setup()
t.test_run()
t.teardown()
| 34.493333
| 144
| 0.614612
|
1017c5028d954766cea293328aca166608720cbc
| 5,068
|
py
|
Python
|
acme/utils/reverb_utils.py
|
GACWR/acme
|
764a92c09673cb826cdaf7ad157c1aab451507df
|
[
"Apache-2.0"
] | 1
|
2022-03-31T17:24:10.000Z
|
2022-03-31T17:24:10.000Z
|
acme/utils/reverb_utils.py
|
GACWR/acme
|
764a92c09673cb826cdaf7ad157c1aab451507df
|
[
"Apache-2.0"
] | null | null | null |
acme/utils/reverb_utils.py
|
GACWR/acme
|
764a92c09673cb826cdaf7ad157c1aab451507df
|
[
"Apache-2.0"
] | null | null | null |
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reverb utils.
Contains functions manipulating reverb tables and samples.
"""
from acme import types
import jax
import numpy as np
import reverb
from reverb import item_selectors
from reverb import rate_limiters
from reverb import reverb_types
import tensorflow as tf
import tree
def make_replay_table_from_info(
table_info: reverb_types.TableInfo) -> reverb.Table:
"""Build a replay table out of its specs in a TableInfo.
Args:
table_info: A TableInfo containing the Table specs.
Returns:
A reverb replay table matching the info specs.
"""
sampler = _make_selector_from_key_distribution_options(
table_info.sampler_options)
remover = _make_selector_from_key_distribution_options(
table_info.remover_options)
rate_limiter = _make_rate_limiter_from_rate_limiter_info(
table_info.rate_limiter_info)
return reverb.Table(
name=table_info.name,
sampler=sampler,
remover=remover,
max_size=table_info.max_size,
rate_limiter=rate_limiter,
max_times_sampled=table_info.max_times_sampled,
signature=table_info.signature)
def _make_selector_from_key_distribution_options(
options) -> reverb_types.SelectorType:
"""Returns a Selector from its KeyDistributionOptions description."""
one_of = options.WhichOneof('distribution')
if one_of == 'fifo':
return item_selectors.Fifo()
if one_of == 'uniform':
return item_selectors.Uniform()
if one_of == 'prioritized':
return item_selectors.Prioritized(options.prioritized.priority_exponent)
if one_of == 'heap':
if options.heap.min_heap:
return item_selectors.MinHeap()
return item_selectors.MaxHeap()
if one_of == 'lifo':
return item_selectors.Lifo()
raise ValueError(f'Unknown distribution field: {one_of}')
def _make_rate_limiter_from_rate_limiter_info(
info) -> rate_limiters.RateLimiter:
return rate_limiters.SampleToInsertRatio(
samples_per_insert=info.samples_per_insert,
min_size_to_sample=info.min_size_to_sample,
error_buffer=(info.min_diff, info.max_diff))
def replay_sample_to_sars_transition(
sample: reverb.ReplaySample,
is_sequence: bool,
strip_last_transition: bool = False,
flatten_batch: bool = False) -> types.Transition:
"""Converts the replay sample to a types.Transition.
NB: If is_sequence is True then the last next_observation of each sequence is
rubbish. Don't train on it.
Args:
sample: The replay sample
is_sequence: If False we expect the sample data to match the
types.Transition already. Otherwise we expect a batch of sequences of
steps.
strip_last_transition: If True and is_sequence, the last transition will be
stripped as its next_observation field is incorrect.
flatten_batch: If True and is_sequence, the two batch dimensions will be
flatten to one.
Returns:
A types.Transition built from the sample data.
If is_sequence and strip_last_transition are both True, the output will be
smaller than the output as the last transition of every sequence will have
been removed.
"""
if not is_sequence:
return types.Transition(*sample.data)
# Note that the last next_observation is invalid.
steps = sample.data
def roll(observation):
return np.roll(observation, shift=-1, axis=1)
transitions = types.Transition(
observation=steps.observation,
action=steps.action,
reward=steps.reward,
discount=steps.discount,
next_observation=tree.map_structure(roll, steps.observation),
extras=steps.extras)
if strip_last_transition:
# We remove the last transition as its next_observation field is incorrect.
# It has been obtained by rolling the observation field, such that
# transitions.next_observations[:, -1] is transitions.observations[:, 0]
transitions = jax.tree_map(lambda x: x[:, :-1, ...], transitions)
if flatten_batch:
# Merge the 2 leading batch dimensions into 1.
transitions = jax.tree_map(lambda x: np.reshape(x, (-1,) + x.shape[2:]),
transitions)
return transitions
def transition_to_replaysample(
transitions: types.Transition) -> reverb.ReplaySample:
"""Converts a types.Transition to a reverb.ReplaySample."""
info = tree.map_structure(lambda dtype: tf.ones([], dtype),
reverb.SampleInfo.tf_dtypes())
return reverb.ReplaySample(info=info, data=transitions)
| 35.690141
| 79
| 0.740923
|
6cc715f23f8970e477a9a77321fc57f51927838e
| 21,276
|
py
|
Python
|
sktime/classification/interval_based/_drcif.py
|
AreloTanoh/sktime
|
34f3a62a1ec86ff19da313aa421b4594f02ed993
|
[
"BSD-3-Clause"
] | 2
|
2020-12-25T08:08:38.000Z
|
2021-04-07T08:00:56.000Z
|
sktime/classification/interval_based/_drcif.py
|
afzal442/sktime
|
294429e7f2ac5824171bb61ad075e0af0055cb02
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/classification/interval_based/_drcif.py
|
afzal442/sktime
|
294429e7f2ac5824171bb61ad075e0af0055cb02
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""DrCIF classifier.
interval based DrCIF classifier extracting catch22 features from random intervals on
periodogram and differences representations as well as the base series.
"""
__author__ = ["MatthewMiddlehurst"]
__all__ = ["DrCIF"]
import math
import time
import numpy as np
from joblib import Parallel, delayed
from sklearn.base import BaseEstimator
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_random_state
from sklearn.utils.multiclass import class_distribution
from sktime.base._base import _clone_estimator
from sktime.classification.base import BaseClassifier
from sktime.contrib.vector_classifiers._continuous_interval_tree import (
_drcif_feature,
ContinuousIntervalTree,
)
from sktime.transformations.panel.catch22 import Catch22
from sktime.utils.validation import check_n_jobs
from sktime.utils.validation.panel import check_X_y
class DrCIF(BaseClassifier):
"""Diverse Representation Canonical Interval Forest Classifier (DrCIF).
Extension of the CIF algorithm using multple representations. Implementation of the
interval based forest making use of the catch22 feature set on randomly selected
intervals on the base series, periodogram representation and differences
representation described in the HIVE-COTE 2.0 paper Middlehurst et al (2021). [1]_
Overview: Input "n" series with "d" dimensions of length "m".
For each tree
- Sample n_intervals intervals per representation of random position and length
- Subsample att_subsample_size catch22 or summary statistic attributes randomly
- Randomly select dimension for each interval
- Calculate attributes for each interval from its representation, concatenate
to form new data set
- Build decision tree on new data set
Ensemble the trees with averaged probability estimates
Parameters
----------
n_estimators : int, default=200
Number of estimators to build for the ensemble.
n_intervals : int, length 3 list of int or None, default=None
Number of intervals to extract per representation per tree as an int for all
representations or list for individual settings, if None extracts
(4 + (sqrt(representation_length) * sqrt(n_dims)) / 3) intervals.
att_subsample_size : int, default=10
Number of catch22 or summary statistic attributes to subsample per tree.
min_interval : int or length 3 list of int, default=4
Minimum length of an interval per representation as an int for all
representations or list for individual settings.
max_interval : int, length 3 list of int or None, default=None
Maximum length of an interval per representation as an int for all
representations or list for individual settings, if None set to
(representation_length / 2).
base_estimator : BaseEstimator or str, default="DTC"
Base estimator for the ensemble, can be supplied a sklearn BaseEstimator or a
string for suggested options.
"DTC" uses the sklearn DecisionTreeClassifier using entropy as a splitting
measure.
"CIT" uses the sktime ContinuousIntervalTree, an implementation of the original
tree used with embedded attribute processing for faster predictions.
time_limit_in_minutes : int, default=0
Time contract to limit build time in minutes, overriding n_estimators.
Default of 0 means n_estimators is used.
contract_max_n_estimators : int, default=500
Max number of estimators when time_limit_in_minutes is set.
save_transformed_data : bool, default=False
Save the data transformed in fit for use in _get_train_probs.
n_jobs : int, default=1
The number of jobs to run in parallel for both `fit` and `predict`.
``-1`` means using all processors.
random_state : int or None, default=None
Seed for random number generation.
Attributes
----------
n_classes : int
The number of classes.
n_instances : int
The number of train cases.
n_dims : int
The number of dimensions per case.
series_length : int
The length of each series.
classes_ : list
The classes labels.
total_intervals : int
Total number of intervals per tree from all representations.
estimators_ : list of shape (n_estimators) of BaseEstimator
The collections of estimators trained in fit.
intervals : list of shape (n_estimators) of ndarray with shape (total_intervals,2)
Stores indexes of each intervals start and end points for all classifiers.
atts : list of shape (n_estimators) of array with shape (att_subsample_size)
Attribute indexes of the subsampled catch22 or summary statistic for all
classifiers.
dims : list of shape (n_estimators) of array with shape (total_intervals)
The dimension to extract attributes from each interval for all classifiers.
transformed_data : list of shape (n_estimators) of ndarray with shape
(n_instances,total_intervals * att_subsample_size)
The transformed dataset for all classifiers. Only saved when
save_transformed_data is true.
See Also
--------
CanonicalIntervalForest
Notes
-----
For the Java version, see
`TSML <https://github.com/uea-machine-learning/tsml/blob/master/src/main/java
/tsml/classifiers/interval_based/DrCIF.java>`_.
References
----------
.. [1] Middlehurst, Matthew, James Large, Michael Flynn, Jason Lines, Aaron Bostrom,
and Anthony Bagnall. "HIVE-COTE 2.0: a new meta ensemble for time series
classification." arXiv preprint arXiv:2104.07551 (2021).
Examples
--------
>>> from sktime.classification.interval_based import DrCIF
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> clf = DrCIF(n_estimators=10)
>>> clf.fit(X_train, y_train)
DrCIF(...)
>>> y_pred = clf.predict(X_test)
"""
_tags = {
"capability:multivariate": True,
"capability:unequal_length": False,
"capability:missing_values": False,
"capability:train_estimate": True,
"capability:contractable": True,
}
def __init__(
self,
n_estimators=200,
n_intervals=None,
att_subsample_size=10,
min_interval=4,
max_interval=None,
base_estimator="DTC",
time_limit_in_minutes=0.0,
contract_max_n_estimators=500,
save_transformed_data=False,
n_jobs=1,
random_state=None,
):
self.n_estimators = n_estimators
self.n_intervals = n_intervals
self.att_subsample_size = att_subsample_size
self.min_interval = min_interval
self.max_interval = max_interval
self.base_estimator = base_estimator
self.time_limit_in_minutes = time_limit_in_minutes
self.contract_max_n_estimators = contract_max_n_estimators
self.save_transformed_data = save_transformed_data
self.random_state = random_state
self.n_jobs = n_jobs
# The following set in method fit
self.n_classes = 0
self.n_instances = 0
self.n_dims = 0
self.series_length = 0
self.classes_ = []
self.total_intervals = 0
self.estimators_ = []
self.intervals = []
self.atts = []
self.dims = []
self.transformed_data = []
self._n_estimators = n_estimators
self._n_intervals = n_intervals
self._att_subsample_size = att_subsample_size
self._min_interval = min_interval
self._max_interval = max_interval
self._base_estimator = base_estimator
self._n_jobs = n_jobs
super(DrCIF, self).__init__()
def _fit(self, X, y):
self._n_jobs = check_n_jobs(self.n_jobs)
self.n_instances, self.n_dims, self.series_length = X.shape
self.n_classes = np.unique(y).shape[0]
self.classes_ = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
time_limit = self.time_limit_in_minutes * 60
start_time = time.time()
train_time = 0
if self.base_estimator == "DTC":
self._base_estimator = DecisionTreeClassifier(criterion="entropy")
elif self.base_estimator == "CIT":
self._base_estimator = ContinuousIntervalTree()
elif isinstance(self.base_estimator, BaseEstimator):
self._base_estimator = self.base_estimator
else:
raise ValueError("DrCIF invalid base estimator given.")
X_p = np.zeros(
(
self.n_instances,
self.n_dims,
int(
math.pow(2, math.ceil(math.log(self.series_length, 2)))
- self.series_length
),
)
)
X_p = np.concatenate((X, X_p), axis=2)
X_p = np.abs(np.fft.fft(X_p)[:, :, : int(X_p.shape[2] / 2)])
X_d = np.diff(X, 1)
if self.n_intervals is None:
self._n_intervals = [None, None, None]
self._n_intervals[0] = 4 + int(
(math.sqrt(self.series_length) * math.sqrt(self.n_dims)) / 3
)
self._n_intervals[1] = 4 + int(
(math.sqrt(X_p.shape[2]) * math.sqrt(self.n_dims)) / 3
)
self._n_intervals[2] = 4 + int(
(math.sqrt(X_d.shape[2]) * math.sqrt(self.n_dims)) / 3
)
elif isinstance(self.n_intervals, int):
self._n_intervals = [self.n_intervals, self.n_intervals, self.n_intervals]
elif isinstance(self.n_intervals, list) and len(self.n_intervals) == 3:
self._n_intervals = self.n_intervals
else:
raise ValueError("DrCIF n_intervals must be an int or list of length 3.")
for i, n in enumerate(self._n_intervals):
if n <= 0:
self._n_intervals[i] = 1
if self.att_subsample_size > 25:
self._att_subsample_size = 25
if isinstance(self.min_interval, int):
self._min_interval = [
self.min_interval,
self.min_interval,
self.min_interval,
]
elif isinstance(self.min_interval, list) and len(self.min_interval) == 3:
self._min_interval = self.min_interval
else:
raise ValueError("DrCIF min_interval must be an int or list of length 3.")
if self.series_length < self._min_interval[0]:
self._min_interval[0] = self.series_length
if X_p.shape[2] < self._min_interval[1]:
self._min_interval[1] = X_p.shape[2]
if X_d.shape[2] < self._min_interval[2]:
self._min_interval[2] = X_d.shape[2]
if self.max_interval is None:
self._max_interval = [
self.series_length / 2,
X_p.shape[2] / 2,
X_d.shape[2] / 2,
]
elif isinstance(self.max_interval, int):
self._max_interval = [
self.max_interval,
self.max_interval,
self.max_interval,
]
elif isinstance(self.max_interval, list) and len(self.max_interval) == 3:
self._max_interval = self.max_interval
else:
raise ValueError("DrCIF max_interval must be an int or list of length 3.")
for i, n in enumerate(self._max_interval):
if n < self._min_interval[i]:
self._max_interval[i] = self._min_interval[i]
self.total_intervals = sum(self._n_intervals)
if time_limit > 0:
self._n_estimators = 0
self.estimators_ = []
self.intervals = []
self.atts = []
self.dims = []
self.transformed_data = []
while (
train_time < time_limit
and self._n_estimators < self.contract_max_n_estimators
):
fit = Parallel(n_jobs=self._n_jobs)(
delayed(self._fit_estimator)(
X,
X_p,
X_d,
y,
i,
)
for i in range(self._n_jobs)
)
(
estimators,
intervals,
dims,
atts,
transformed_data,
) = zip(*fit)
self.estimators_ += estimators
self.intervals += intervals
self.atts += atts
self.dims += dims
self.transformed_data += transformed_data
self._n_estimators += self._n_jobs
train_time = time.time() - start_time
else:
fit = Parallel(n_jobs=self._n_jobs)(
delayed(self._fit_estimator)(
X,
X_p,
X_d,
y,
i,
)
for i in range(self._n_estimators)
)
(
self.estimators_,
self.intervals,
self.dims,
self.atts,
self.transformed_data,
) = zip(*fit)
def _predict(self, X):
rng = check_random_state(self.random_state)
return np.array(
[
self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]
for prob in self._predict_proba(X)
]
)
def _predict_proba(self, X):
n_test_instances, _, series_length = X.shape
if series_length != self.series_length:
raise TypeError(
"ERROR number of attributes in the train does not match "
"that in the test data"
)
X_p = np.zeros(
(
n_test_instances,
self.n_dims,
int(
math.pow(2, math.ceil(math.log(self.series_length, 2)))
- self.series_length
),
)
)
X_p = np.concatenate((X, X_p), axis=2)
X_p = np.abs(np.fft.fft(X_p)[:, :, : int(X_p.shape[2] / 2)])
X_d = np.diff(X, 1)
y_probas = Parallel(n_jobs=self._n_jobs)(
delayed(self._predict_proba_for_estimator)(
X,
X_p,
X_d,
self.estimators_[i],
self.intervals[i],
self.dims[i],
self.atts[i],
)
for i in range(self._n_estimators)
)
output = np.sum(y_probas, axis=0) / (
np.ones(self.n_classes) * self._n_estimators
)
return output
def _get_train_probs(self, X, y):
self.check_is_fitted()
X, y = check_X_y(X, y, coerce_to_numpy=True)
n_instances, n_dims, series_length = X.shape
if (
n_instances != self.n_instances
or n_dims != self.n_dims
or series_length != self.series_length
):
raise ValueError(
"n_instances, n_dims, series_length mismatch. X should be "
"the same as the training data used in fit for generating train "
"probabilities."
)
if not self.save_transformed_data:
raise ValueError("Currently only works with saved transform data from fit.")
p = Parallel(n_jobs=self._n_jobs)(
delayed(self._train_probas_for_estimator)(
y,
i,
)
for i in range(self._n_estimators)
)
y_probas, oobs = zip(*p)
results = np.sum(y_probas, axis=0)
divisors = np.zeros(n_instances)
for oob in oobs:
for inst in oob:
divisors[inst] += 1
for i in range(n_instances):
results[i] = (
np.ones(self.n_classes) * (1 / self.n_classes)
if divisors[i] == 0
else results[i] / (np.ones(self.n_classes) * divisors[i])
)
return results
def _fit_estimator(self, X, X_p, X_d, y, idx):
c22 = Catch22(outlier_norm=True)
T = [X, X_p, X_d]
rs = 255 if self.random_state == 0 else self.random_state
rs = None if self.random_state is None else rs * 37 * (idx + 1)
rng = check_random_state(rs)
transformed_x = np.empty(
shape=(self._att_subsample_size * self.total_intervals, self.n_instances),
dtype=np.float32,
)
atts = rng.choice(29, self._att_subsample_size, replace=False)
dims = rng.choice(self.n_dims, self.total_intervals, replace=True)
intervals = np.zeros((self.total_intervals, 2), dtype=int)
p = 0
j = 0
for r in range(0, len(T)):
transform_length = T[r].shape[2]
# Find the random intervals for classifier i, transformation r
# and concatenate features
for _ in range(0, self._n_intervals[r]):
if rng.random() < 0.5:
intervals[j][0] = rng.randint(
0, transform_length - self._min_interval[r]
)
len_range = min(
transform_length - intervals[j][0],
self._max_interval[r],
)
length = (
rng.randint(0, len_range - self._min_interval[r])
+ self._min_interval[r]
)
intervals[j][1] = intervals[j][0] + length
else:
intervals[j][1] = (
rng.randint(0, transform_length - self._min_interval[r])
+ self._min_interval[r]
)
len_range = min(intervals[j][1], self._max_interval[r])
length = (
rng.randint(0, len_range - self._min_interval[r])
+ self._min_interval[r]
if len_range - self._min_interval[r] > 0
else self._min_interval[r]
)
intervals[j][0] = intervals[j][1] - length
for a in range(0, self._att_subsample_size):
transformed_x[p] = _drcif_feature(
T[r], intervals[j], dims[j], atts[a], c22
)
p += 1
j += 1
tree = _clone_estimator(self._base_estimator, random_state=rs)
transformed_x = transformed_x.T
transformed_x = transformed_x.round(8)
transformed_x = np.nan_to_num(transformed_x, False, 0, 0, 0)
tree.fit(transformed_x, y)
return [
tree,
intervals,
dims,
atts,
transformed_x if self.save_transformed_data else None,
]
def _predict_proba_for_estimator(
self, X, X_p, X_d, classifier, intervals, dims, atts
):
c22 = Catch22(outlier_norm=True)
if isinstance(self._base_estimator, ContinuousIntervalTree):
return classifier._predict_proba_drcif(
X, X_p, X_d, c22, self._n_intervals, intervals, dims, atts
)
else:
T = [X, X_p, X_d]
transformed_x = np.empty(
shape=(self._att_subsample_size * self.total_intervals, X.shape[0]),
dtype=np.float32,
)
p = 0
j = 0
for r in range(0, len(T)):
for _ in range(0, self._n_intervals[r]):
for a in range(0, self._att_subsample_size):
transformed_x[p] = _drcif_feature(
T[r], intervals[j], dims[j], atts[a], c22
)
p += 1
j += 1
transformed_x = transformed_x.T
transformed_x.round(8)
np.nan_to_num(transformed_x, False, 0, 0, 0)
return classifier.predict_proba(transformed_x)
def _train_probas_for_estimator(self, y, idx):
rs = 255 if self.random_state == 0 else self.random_state
rs = None if self.random_state is None else rs * 37 * (idx + 1)
rng = check_random_state(rs)
indices = range(self.n_instances)
subsample = rng.choice(self.n_instances, size=self.n_instances)
oob = [n for n in indices if n not in subsample]
clf = _clone_estimator(self._base_estimator, rs)
clf.fit(self.transformed_data[idx][subsample], y[subsample])
probas = clf.predict_proba(self.transformed_data[idx][oob])
results = np.zeros((self.n_instances, self.n_classes))
for n, proba in enumerate(probas):
results[oob[n]] += proba
return [results, oob]
| 36.873484
| 88
| 0.577082
|
f71ab74bbb37c06ec87292445a3616dd3669f146
| 7,850
|
py
|
Python
|
openprompt/prompts/one2one_verbalizer.py
|
hlzhang109/OpenPrompt
|
8a1ec1ceac3805a11b09dda9b96ad7406d222f26
|
[
"Apache-2.0"
] | null | null | null |
openprompt/prompts/one2one_verbalizer.py
|
hlzhang109/OpenPrompt
|
8a1ec1ceac3805a11b09dda9b96ad7406d222f26
|
[
"Apache-2.0"
] | null | null | null |
openprompt/prompts/one2one_verbalizer.py
|
hlzhang109/OpenPrompt
|
8a1ec1ceac3805a11b09dda9b96ad7406d222f26
|
[
"Apache-2.0"
] | null | null | null |
import json
from transformers.tokenization_utils import PreTrainedTokenizer
from yacs.config import CfgNode
from openprompt.data_utils.data_utils import InputFeatures
import re
from openprompt import Verbalizer
from typing import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from openprompt.utils.logging import logger
class One2oneVerbalizer(Verbalizer):
r"""
The basic manually defined verbalizer class, this class is inherited from the :obj:`Verbalizer` class.
This class restrict the use of label words to one words per label. For a verbalzer with less constraints,
please use Basic ManualVerbalizer.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): The tokenizer of the current pre-trained model to point out the vocabulary.
classes (:obj:`classes`): The classes (or labels) of the current task.
num_classes (:obj:`int`): Optional. The number of classes of the verbalizer. Only one of `classes` and `num_classes` should be used.
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer. (used in PLMs like RoBERTa, which is sensitive to prefix space)
multi_token_handler (:obj:`str`, optional): The handling strategy for multiple tokens produced by the tokenizer.
"""
def __init__(self,
tokenizer: PreTrainedTokenizer,
num_classes: Optional[int] = None,
classes: Optional[List] = None,
label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,
prefix: Optional[str] = " ",
multi_token_handler: Optional[str] = "first",
):
super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)
self.prefix = prefix
self.multi_token_handler = multi_token_handler
self.label_words = label_words
def on_label_words_set(self):
super().on_label_words_set()
self.label_words = self.add_prefix(self.label_words, self.prefix)
self.generate_parameters()
@staticmethod
def add_prefix(label_words, prefix):
r"""Add prefix to label words. For example, if a label words is in the middle of a template,
the prefix should be ``' '``.
Args:
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer.
Returns:
:obj:`Sequence[str]`: New label words with prefix.
"""
new_label_words = []
if isinstance(label_words[0], list):
assert max([len(w) for w in label_words]) == 1, "Providing multiple label words, you should use other verbalizers instead."
label_words = [w[0] for w in label_words]
for word in label_words:
if word.startswith("<!>"):
new_label_words.append(word.split("<!>")[1])
else:
new_label_words.append(prefix + word)
return new_label_words
def generate_parameters(self) -> List:
r"""In basic manual template, the parameters are generated from label words directly.
In this implementation, the label_words should not be tokenized into more than one token.
"""
words_ids = []
for word in self.label_words:
word_ids = self.tokenizer.encode(word, add_special_tokens=False)
if len(word_ids) > 1:
logger.warning("Word {} is split into multiple tokens: {}. \
If this is not what you expect, try using another word for this verbalizer" \
.format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))
words_ids.append(word_ids)
max_len = max([len(ids) for ids in words_ids])
words_ids_mask = [[1]*len(ids) + [0]*(max_len-len(ids)) for ids in words_ids]
words_ids = [ids+[0]*(max_len-len(ids)) for ids in words_ids]
words_ids_tensor = torch.tensor(words_ids)
words_ids_mask = torch.tensor(words_ids_mask)
self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)
self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)
def project(self,
logits: torch.Tensor,
**kwargs,
) -> torch.Tensor:
r"""
Project the labels, the return value is the normalized (sum to 1) probs of label words.
Args:
logits (:obj:`torch.Tensor`): The orginal logits of label words.
Returns:
:obj:`torch.Tensor`: The normalized logits of label words
"""
label_words_logits = logits[:, self.label_words_ids]
label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)
return label_words_logits
def process_logits(self, logits: torch.Tensor, **kwargs):
r"""A whole framework to process the original logits over the vocabulary, which contains four steps:
(1) Project the logits into logits of label words
(2) Normalize over all label words
(3) Calibrate (optional)
Args:
logits (:obj:`torch.Tensor`): The orginal logits.
Returns:
(:obj:`torch.Tensor`): The final processed logits over the label words set.
"""
# project
label_words_logits = self.project(logits, **kwargs) #Output: (batch_size, num_classes) or (batch_size, num_classes, num_label_words_per_label)
# normalize
label_words_probs = self.normalize(label_words_logits)
# calibrate
if hasattr(self, "_calibrate_logits") and self._calibrate_logits is not None:
label_words_probs = self.calibrate(label_words_probs=label_words_probs)
# convert to logits
label_words_logits = torch.log(label_words_probs+1e-15)
return label_words_logits
def normalize(self, logits: torch.Tensor) -> torch.Tensor:
"""
Given logits regarding the entire vocabulary, return the probs over the label words set.
Args:
logits (:obj:`Tensor`): The logits over the entire vocabulary.
Returns:
:obj:`Tensor`: The logits over the label words set.
"""
batch_size = logits.shape[0]
return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)
def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:
r"""
Args:
label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]
Returns:
:obj:`torch.Tensor`: The calibrated probability of label words.
"""
shape = label_words_probs.shape
assert self._calibrate_logits.dim() == 1, "self._calibrate_logits are not 1-d tensor"
calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))
assert calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:] \
and calibrate_label_words_probs.shape[0]==1, "shape not match"
label_words_probs /= (calibrate_label_words_probs+1e-15)
# normalize # TODO Test the performance
norm = label_words_probs.reshape(shape[0], -1).sum(dim=-1,keepdim=True) # TODO Test the performance of detaching()
label_words_probs /= norm
return label_words_probs
| 44.101124
| 183
| 0.643057
|
98edc28d70e8a28e0bf6954e7a7da30111d30bad
| 31,117
|
py
|
Python
|
numba/cuda/compiler.py
|
svrakitin/numba
|
830a2c7ccc410f270677b0b241f9b8acc2598101
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
numba/cuda/compiler.py
|
svrakitin/numba
|
830a2c7ccc410f270677b0b241f9b8acc2598101
|
[
"BSD-2-Clause",
"Apache-2.0"
] | 1
|
2019-08-29T21:03:09.000Z
|
2019-08-29T21:04:26.000Z
|
numba/cuda/compiler.py
|
svrakitin/numba
|
830a2c7ccc410f270677b0b241f9b8acc2598101
|
[
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null |
import ctypes
import os
from functools import reduce, wraps
import operator
import sys
import threading
import warnings
import numpy as np
from numba.core.typing.templates import AbstractTemplate, ConcreteTemplate
from numba.core import types, typing, utils, funcdesc, serialize, config, compiler, sigutils
from numba.core.compiler_lock import global_compiler_lock
from .cudadrv.autotune import AutoTuner
from .cudadrv.devices import get_context
from .cudadrv import nvvm, devicearray, driver
from .errors import normalize_kernel_dimensions
from .api import get_current_device
from .args import wrap_arg
@global_compiler_lock
def compile_cuda(pyfunc, return_type, args, debug, inline):
# First compilation will trigger the initialization of the CUDA backend.
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
# TODO handle debug flag
flags = compiler.Flags()
# Do not compile (generate native code), just lower (to LLVM)
flags.set('no_compile')
flags.set('no_cpython_wrapper')
if debug:
flags.set('debuginfo')
if inline:
flags.set('forceinline')
# Run compilation pipeline
cres = compiler.compile_extra(typingctx=typingctx,
targetctx=targetctx,
func=pyfunc,
args=args,
return_type=return_type,
flags=flags,
locals={})
library = cres.library
library.finalize()
return cres
@global_compiler_lock
def compile_kernel(pyfunc, args, link, debug=False, inline=False,
fastmath=False, extensions=[], max_registers=None):
cres = compile_cuda(pyfunc, types.void, args, debug=debug, inline=inline)
fname = cres.fndesc.llvm_func_name
lib, kernel = cres.target_context.prepare_cuda_kernel(cres.library, fname,
cres.signature.args,
debug=debug)
cukern = CUDAKernel(llvm_module=lib._final_module,
name=kernel.name,
pretty_name=cres.fndesc.qualname,
argtypes=cres.signature.args,
type_annotation=cres.type_annotation,
link=link,
debug=debug,
call_helper=cres.call_helper,
fastmath=fastmath,
extensions=extensions,
max_registers=max_registers)
return cukern
class DeviceFunctionTemplate(object):
"""Unmaterialized device function
"""
def __init__(self, pyfunc, debug, inline):
self.py_func = pyfunc
self.debug = debug
self.inline = inline
self._compileinfos = {}
def __reduce__(self):
glbls = serialize._get_function_globals_for_reduction(self.py_func)
func_reduced = serialize._reduce_function(self.py_func, glbls)
args = (self.__class__, func_reduced, self.debug, self.inline)
return (serialize._rebuild_reduction, args)
@classmethod
def _rebuild(cls, func_reduced, debug, inline):
func = serialize._rebuild_function(*func_reduced)
return compile_device_template(func, debug=debug, inline=inline)
def compile(self, args):
"""Compile the function for the given argument types.
Each signature is compiled once by caching the compiled function inside
this object.
Returns the `CompileResult`.
"""
if args not in self._compileinfos:
cres = compile_cuda(self.py_func, None, args, debug=self.debug,
inline=self.inline)
first_definition = not self._compileinfos
self._compileinfos[args] = cres
libs = [cres.library]
if first_definition:
# First definition
cres.target_context.insert_user_function(self, cres.fndesc,
libs)
else:
cres.target_context.add_user_function(self, cres.fndesc, libs)
else:
cres = self._compileinfos[args]
return cres
def inspect_llvm(self, args):
"""Returns the LLVM-IR text compiled for *args*.
Parameters
----------
args: tuple[Type]
Argument types.
Returns
-------
llvmir : str
"""
cres = self._compileinfos[args]
mod = cres.library._final_module
return str(mod)
def inspect_ptx(self, args, nvvm_options={}):
"""Returns the PTX compiled for *args* for the currently active GPU
Parameters
----------
args: tuple[Type]
Argument types.
nvvm_options : dict; optional
See `CompilationUnit.compile` in `numba/cuda/cudadrv/nvvm.py`.
Returns
-------
ptx : bytes
"""
llvmir = self.inspect_llvm(args)
# Make PTX
cuctx = get_context()
device = cuctx.device
cc = device.compute_capability
arch = nvvm.get_arch_option(*cc)
ptx = nvvm.llvm_to_ptx(llvmir, opt=3, arch=arch, **nvvm_options)
return ptx
def compile_device_template(pyfunc, debug=False, inline=False):
"""Create a DeviceFunctionTemplate object and register the object to
the CUDA typing context.
"""
from .descriptor import CUDATargetDesc
dft = DeviceFunctionTemplate(pyfunc, debug=debug, inline=inline)
class device_function_template(AbstractTemplate):
key = dft
def generic(self, args, kws):
assert not kws
return dft.compile(args).signature
typingctx = CUDATargetDesc.typingctx
typingctx.insert_user_function(dft, device_function_template)
return dft
def compile_device(pyfunc, return_type, args, inline=True, debug=False):
return DeviceFunction(pyfunc, return_type, args, inline=True, debug=False)
def declare_device_function(name, restype, argtypes):
from .descriptor import CUDATargetDesc
typingctx = CUDATargetDesc.typingctx
targetctx = CUDATargetDesc.targetctx
sig = typing.signature(restype, *argtypes)
extfn = ExternFunction(name, sig)
class device_function_template(ConcreteTemplate):
key = extfn
cases = [sig]
fndesc = funcdesc.ExternalFunctionDescriptor(
name=name, restype=restype, argtypes=argtypes)
typingctx.insert_user_function(extfn, device_function_template)
targetctx.insert_user_function(extfn, fndesc)
return extfn
class DeviceFunction(object):
def __init__(self, pyfunc, return_type, args, inline, debug):
self.py_func = pyfunc
self.return_type = return_type
self.args = args
self.inline = True
self.debug = False
cres = compile_cuda(self.py_func, self.return_type, self.args,
debug=self.debug, inline=self.inline)
self.cres = cres
# Register
class device_function_template(ConcreteTemplate):
key = self
cases = [cres.signature]
cres.typing_context.insert_user_function(
self, device_function_template)
cres.target_context.insert_user_function(self, cres.fndesc,
[cres.library])
def __reduce__(self):
globs = serialize._get_function_globals_for_reduction(self.py_func)
func_reduced = serialize._reduce_function(self.py_func, globs)
args = (self.__class__, func_reduced, self.return_type, self.args,
self.inline, self.debug)
return (serialize._rebuild_reduction, args)
@classmethod
def _rebuild(cls, func_reduced, return_type, args, inline, debug):
return cls(serialize._rebuild_function(*func_reduced), return_type,
args, inline, debug)
def __repr__(self):
fmt = "<DeviceFunction py_func={0} signature={1}>"
return fmt.format(self.py_func, self.cres.signature)
class ExternFunction(object):
def __init__(self, name, sig):
self.name = name
self.sig = sig
class ForAll(object):
def __init__(self, kernel, ntasks, tpb, stream, sharedmem):
if ntasks < 0:
raise ValueError("Can't create ForAll with negative task count: %s"
% ntasks)
self.kernel = kernel
self.ntasks = ntasks
self.thread_per_block = tpb
self.stream = stream
self.sharedmem = sharedmem
def __call__(self, *args):
if self.ntasks == 0:
return
if isinstance(self.kernel, AutoJitCUDAKernel):
kernel = self.kernel.specialize(*args)
else:
kernel = self.kernel
tpb = self._compute_thread_per_block(kernel)
tpbm1 = tpb - 1
blkct = (self.ntasks + tpbm1) // tpb
return kernel.configure(blkct, tpb, stream=self.stream,
sharedmem=self.sharedmem)(*args)
def _compute_thread_per_block(self, kernel):
tpb = self.thread_per_block
# Prefer user-specified config
if tpb != 0:
return tpb
# Else, ask the driver to give a good cofnig
else:
ctx = get_context()
kwargs = dict(
func=kernel._func.get(),
b2d_func=0, # dynamic-shared memory is constant to blksz
memsize=self.sharedmem,
blocksizelimit=1024,
)
try:
# Raises from the driver if the feature is unavailable
_, tpb = ctx.get_max_potential_block_size(**kwargs)
except AttributeError:
# Fallback to table-based approach.
tpb = self._fallback_autotune_best(kernel)
raise
return tpb
def _fallback_autotune_best(self, kernel):
try:
tpb = kernel.autotune.best()
except ValueError:
warnings.warn('Could not autotune, using default tpb of 128')
tpb = 128
return tpb
class CUDAKernelBase(object):
"""Define interface for configurable kernels
"""
def __init__(self):
self.griddim = (1, 1)
self.blockdim = (1, 1, 1)
self.sharedmem = 0
self.stream = 0
def copy(self):
"""
Shallow copy the instance
"""
# Note: avoid using ``copy`` which calls __reduce__
cls = self.__class__
# new bare instance
new = cls.__new__(cls)
# update the internal states
new.__dict__.update(self.__dict__)
return new
def configure(self, griddim, blockdim, stream=0, sharedmem=0):
griddim, blockdim = normalize_kernel_dimensions(griddim, blockdim)
clone = self.copy()
clone.griddim = tuple(griddim)
clone.blockdim = tuple(blockdim)
clone.stream = stream
clone.sharedmem = sharedmem
return clone
def __getitem__(self, args):
if len(args) not in [2, 3, 4]:
raise ValueError('must specify at least the griddim and blockdim')
return self.configure(*args)
def forall(self, ntasks, tpb=0, stream=0, sharedmem=0):
"""Returns a configured kernel for 1D kernel of given number of tasks
``ntasks``.
This assumes that:
- the kernel 1-to-1 maps global thread id ``cuda.grid(1)`` to tasks.
- the kernel must check if the thread id is valid."""
return ForAll(self, ntasks, tpb=tpb, stream=stream, sharedmem=sharedmem)
def _serialize_config(self):
"""
Helper for serializing the grid, block and shared memory configuration.
CUDA stream config is not serialized.
"""
return self.griddim, self.blockdim, self.sharedmem
def _deserialize_config(self, config):
"""
Helper for deserializing the grid, block and shared memory
configuration.
"""
self.griddim, self.blockdim, self.sharedmem = config
class CachedPTX(object):
"""A PTX cache that uses compute capability as a cache key
"""
def __init__(self, name, llvmir, options):
self.name = name
self.llvmir = llvmir
self.cache = {}
self._extra_options = options.copy()
def get(self):
"""
Get PTX for the current active context.
"""
cuctx = get_context()
device = cuctx.device
cc = device.compute_capability
ptx = self.cache.get(cc)
if ptx is None:
arch = nvvm.get_arch_option(*cc)
ptx = nvvm.llvm_to_ptx(self.llvmir, opt=3, arch=arch,
**self._extra_options)
self.cache[cc] = ptx
if config.DUMP_ASSEMBLY:
print(("ASSEMBLY %s" % self.name).center(80, '-'))
print(ptx.decode('utf-8'))
print('=' * 80)
return ptx
class CachedCUFunction(object):
"""
Get or compile CUDA function for the current active context
Uses device ID as key for cache.
"""
def __init__(self, entry_name, ptx, linking, max_registers):
self.entry_name = entry_name
self.ptx = ptx
self.linking = linking
self.cache = {}
self.ccinfos = {}
self.max_registers = max_registers
def get(self):
cuctx = get_context()
device = cuctx.device
cufunc = self.cache.get(device.id)
if cufunc is None:
ptx = self.ptx.get()
# Link
linker = driver.Linker(max_registers=self.max_registers)
linker.add_ptx(ptx)
for path in self.linking:
linker.add_file_guess_ext(path)
cubin, _size = linker.complete()
compile_info = linker.info_log
module = cuctx.create_module_image(cubin)
# Load
cufunc = module.get_function(self.entry_name)
self.cache[device.id] = cufunc
self.ccinfos[device.id] = compile_info
return cufunc
def get_info(self):
self.get() # trigger compilation
cuctx = get_context()
device = cuctx.device
ci = self.ccinfos[device.id]
return ci
def __reduce__(self):
"""
Reduce the instance for serialization.
Pre-compiled PTX code string is serialized inside the `ptx` (CachedPTX).
Loaded CUfunctions are discarded. They are recreated when unserialized.
"""
if self.linking:
msg = ('cannot pickle CUDA kernel function with additional '
'libraries to link against')
raise RuntimeError(msg)
args = (self.__class__, self.entry_name, self.ptx, self.linking, self.max_registers)
return (serialize._rebuild_reduction, args)
@classmethod
def _rebuild(cls, entry_name, ptx, linking, max_registers):
"""
Rebuild an instance.
"""
return cls(entry_name, ptx, linking, max_registers)
class CUDAKernel(CUDAKernelBase):
'''
CUDA Kernel specialized for a given set of argument types. When called, this
object will validate that the argument types match those for which it is
specialized, and then launch the kernel on the device.
'''
def __init__(self, llvm_module, name, pretty_name, argtypes, call_helper,
link=(), debug=False, fastmath=False, type_annotation=None,
extensions=[], max_registers=None):
super(CUDAKernel, self).__init__()
# initialize CUfunction
options = {'debug': debug}
if fastmath:
options.update(dict(ftz=True,
prec_sqrt=False,
prec_div=False,
fma=True))
ptx = CachedPTX(pretty_name, str(llvm_module), options=options)
cufunc = CachedCUFunction(name, ptx, link, max_registers)
# populate members
self.entry_name = name
self.argument_types = tuple(argtypes)
self.linking = tuple(link)
self._type_annotation = type_annotation
self._func = cufunc
self.debug = debug
self.call_helper = call_helper
self.extensions = list(extensions)
@classmethod
def _rebuild(cls, name, argtypes, cufunc, link, debug, call_helper, extensions, config):
"""
Rebuild an instance.
"""
instance = cls.__new__(cls)
# invoke parent constructor
super(cls, instance).__init__()
# populate members
instance.entry_name = name
instance.argument_types = tuple(argtypes)
instance.linking = tuple(link)
instance._type_annotation = None
instance._func = cufunc
instance.debug = debug
instance.call_helper = call_helper
instance.extensions = extensions
# update config
instance._deserialize_config(config)
return instance
def __reduce__(self):
"""
Reduce the instance for serialization.
Compiled definitions are serialized in PTX form.
Type annotation are discarded.
Thread, block and shared memory configuration are serialized.
Stream information is discarded.
"""
config = self._serialize_config()
args = (self.__class__, self.entry_name, self.argument_types,
self._func, self.linking, self.debug, self.call_helper,
self.extensions, config)
return (serialize._rebuild_reduction, args)
def __call__(self, *args, **kwargs):
assert not kwargs
self._kernel_call(args=args,
griddim=self.griddim,
blockdim=self.blockdim,
stream=self.stream,
sharedmem=self.sharedmem)
def bind(self):
"""
Force binding to current CUDA context
"""
self._func.get()
@property
def ptx(self):
'''
PTX code for this kernel.
'''
return self._func.ptx.get().decode('utf8')
@property
def device(self):
"""
Get current active context
"""
return get_current_device()
def inspect_llvm(self):
'''
Returns the LLVM IR for this kernel.
'''
return str(self._func.ptx.llvmir)
def inspect_asm(self):
'''
Returns the PTX code for this kernel.
'''
return self._func.ptx.get().decode('ascii')
def inspect_types(self, file=None):
'''
Produce a dump of the Python source of this function annotated with the
corresponding Numba IR and type information. The dump is written to
*file*, or *sys.stdout* if *file* is *None*.
'''
if self._type_annotation is None:
raise ValueError("Type annotation is not available")
if file is None:
file = sys.stdout
print("%s %s" % (self.entry_name, self.argument_types), file=file)
print('-' * 80, file=file)
print(self._type_annotation, file=file)
print('=' * 80, file=file)
def _kernel_call(self, args, griddim, blockdim, stream=0, sharedmem=0):
# Prepare kernel
cufunc = self._func.get()
if self.debug:
excname = cufunc.name + "__errcode__"
excmem, excsz = cufunc.module.get_global_symbol(excname)
assert excsz == ctypes.sizeof(ctypes.c_int)
excval = ctypes.c_int()
excmem.memset(0, stream=stream)
# Prepare arguments
retr = [] # hold functors for writeback
kernelargs = []
for t, v in zip(self.argument_types, args):
self._prepare_args(t, v, stream, retr, kernelargs)
# Configure kernel
cu_func = cufunc.configure(griddim, blockdim,
stream=stream,
sharedmem=sharedmem)
# Invoke kernel
cu_func(*kernelargs)
if self.debug:
driver.device_to_host(ctypes.addressof(excval), excmem, excsz)
if excval.value != 0:
# An error occurred
def load_symbol(name):
mem, sz = cufunc.module.get_global_symbol("%s__%s__" %
(cufunc.name,
name))
val = ctypes.c_int()
driver.device_to_host(ctypes.addressof(val), mem, sz)
return val.value
tid = [load_symbol("tid" + i) for i in 'zyx']
ctaid = [load_symbol("ctaid" + i) for i in 'zyx']
code = excval.value
exccls, exc_args, loc = self.call_helper.get_exception(code)
# Prefix the exception message with the source location
if loc is None:
locinfo = ''
else:
sym, filepath, lineno = loc
filepath = os.path.relpath(filepath)
locinfo = 'In function %r, file %s, line %s, ' % (
sym, filepath, lineno,
)
# Prefix the exception message with the thread position
prefix = "%stid=%s ctaid=%s" % (locinfo, tid, ctaid)
if exc_args:
exc_args = ("%s: %s" % (prefix, exc_args[0]),) + exc_args[1:]
else:
exc_args = prefix,
raise exccls(*exc_args)
# retrieve auto converted arrays
for wb in retr:
wb()
def _prepare_args(self, ty, val, stream, retr, kernelargs):
"""
Convert arguments to ctypes and append to kernelargs
"""
# map the arguments using any extension you've registered
for extension in reversed(self.extensions):
ty, val = extension.prepare_args(
ty,
val,
stream=stream,
retr=retr)
if isinstance(ty, types.Array):
devary = wrap_arg(val).to_device(retr, stream)
c_intp = ctypes.c_ssize_t
meminfo = ctypes.c_void_p(0)
parent = ctypes.c_void_p(0)
nitems = c_intp(devary.size)
itemsize = c_intp(devary.dtype.itemsize)
data = ctypes.c_void_p(driver.device_pointer(devary))
kernelargs.append(meminfo)
kernelargs.append(parent)
kernelargs.append(nitems)
kernelargs.append(itemsize)
kernelargs.append(data)
for ax in range(devary.ndim):
kernelargs.append(c_intp(devary.shape[ax]))
for ax in range(devary.ndim):
kernelargs.append(c_intp(devary.strides[ax]))
elif isinstance(ty, types.Integer):
cval = getattr(ctypes, "c_%s" % ty)(val)
kernelargs.append(cval)
elif ty == types.float64:
cval = ctypes.c_double(val)
kernelargs.append(cval)
elif ty == types.float32:
cval = ctypes.c_float(val)
kernelargs.append(cval)
elif ty == types.boolean:
cval = ctypes.c_uint8(int(val))
kernelargs.append(cval)
elif ty == types.complex64:
kernelargs.append(ctypes.c_float(val.real))
kernelargs.append(ctypes.c_float(val.imag))
elif ty == types.complex128:
kernelargs.append(ctypes.c_double(val.real))
kernelargs.append(ctypes.c_double(val.imag))
elif isinstance(ty, (types.NPDatetime, types.NPTimedelta)):
kernelargs.append(ctypes.c_int64(val.view(np.int64)))
elif isinstance(ty, types.Record):
devrec = wrap_arg(val).to_device(retr, stream)
kernelargs.append(devrec)
else:
raise NotImplementedError(ty, val)
@property
def autotune(self):
"""Return the autotuner object associated with this kernel."""
warnings.warn(_deprec_warn_msg.format('autotune'), DeprecationWarning)
has_autotune = hasattr(self, '_autotune')
if has_autotune and self._autotune.dynsmem == self.sharedmem:
return self._autotune
else:
# Get CUDA Function
cufunc = self._func.get()
at = AutoTuner(info=cufunc.attrs, cc=cufunc.device.compute_capability)
self._autotune = at
return self._autotune
@property
def occupancy(self):
"""Occupancy is the ratio of the number of active warps per multiprocessor to the maximum
number of warps that can be active on the multiprocessor at once.
Calculate the theoretical occupancy of the kernel given the
current configuration."""
warnings.warn(_deprec_warn_msg.format('occupancy'), DeprecationWarning)
thread_per_block = reduce(operator.mul, self.blockdim, 1)
return self.autotune.closest(thread_per_block)
_deprec_warn_msg = ("The .{} attribute is is deprecated and will be "
"removed in a future release")
class AutoJitCUDAKernel(CUDAKernelBase):
'''
CUDA Kernel object. When called, the kernel object will specialize itself
for the given arguments (if no suitable specialized version already exists)
& compute capability, and launch on the device associated with the current
context.
Kernel objects are not to be constructed by the user, but instead are
created using the :func:`numba.cuda.jit` decorator.
'''
def __init__(self, func, bind, targetoptions):
super(AutoJitCUDAKernel, self).__init__()
self.py_func = func
self.bind = bind
# keyed by a `(compute capability, args)` tuple
self.definitions = {}
self.targetoptions = targetoptions
# defensive copy
self.targetoptions['extensions'] = \
list(self.targetoptions.get('extensions', []))
from .descriptor import CUDATargetDesc
self.typingctx = CUDATargetDesc.typingctx
@property
def extensions(self):
'''
A list of objects that must have a `prepare_args` function. When a
specialized kernel is called, each argument will be passed through
to the `prepare_args` (from the last object in this list to the
first). The arguments to `prepare_args` are:
- `ty` the numba type of the argument
- `val` the argument value itself
- `stream` the CUDA stream used for the current call to the kernel
- `retr` a list of zero-arg functions that you may want to append
post-call cleanup work to.
The `prepare_args` function must return a tuple `(ty, val)`, which
will be passed in turn to the next right-most `extension`. After all
the extensions have been called, the resulting `(ty, val)` will be
passed into Numba's default argument marshalling logic.
'''
return self.targetoptions['extensions']
def __call__(self, *args):
'''
Specialize and invoke this kernel with *args*.
'''
kernel = self.specialize(*args)
cfg = kernel[self.griddim, self.blockdim, self.stream, self.sharedmem]
cfg(*args)
def specialize(self, *args):
'''
Compile and bind to the current context a version of this kernel
specialized for the given *args*.
'''
argtypes = tuple(
[self.typingctx.resolve_argument_type(a) for a in args])
kernel = self.compile(argtypes)
return kernel
def compile(self, sig):
'''
Compile and bind to the current context a version of this kernel
specialized for the given signature.
'''
argtypes, return_type = sigutils.normalize_signature(sig)
assert return_type is None
cc = get_current_device().compute_capability
kernel = self.definitions.get((cc, argtypes))
if kernel is None:
if 'link' not in self.targetoptions:
self.targetoptions['link'] = ()
kernel = compile_kernel(self.py_func, argtypes,
**self.targetoptions)
self.definitions[(cc, argtypes)] = kernel
if self.bind:
kernel.bind()
return kernel
def inspect_llvm(self, signature=None, compute_capability=None):
'''
Return the LLVM IR for all signatures encountered thus far, or the LLVM
IR for a specific signature and compute_capability if given.
'''
cc = compute_capability or get_current_device().compute_capability
if signature is not None:
return self.definitions[(cc, signature)].inspect_llvm()
else:
return dict((sig, defn.inspect_llvm())
for sig, defn in self.definitions.items())
def inspect_asm(self, signature=None, compute_capability=None):
'''
Return the generated assembly code for all signatures encountered thus
far, or the LLVM IR for a specific signature and compute_capability
if given.
'''
cc = compute_capability or get_current_device().compute_capability
if signature is not None:
return self.definitions[(cc, signature)].inspect_asm()
else:
return dict((sig, defn.inspect_asm())
for sig, defn in self.definitions.items())
def inspect_types(self, file=None):
'''
Produce a dump of the Python source of this function annotated with the
corresponding Numba IR and type information. The dump is written to
*file*, or *sys.stdout* if *file* is *None*.
'''
if file is None:
file = sys.stdout
for _, defn in utils.iteritems(self.definitions):
defn.inspect_types(file=file)
@classmethod
def _rebuild(cls, func_reduced, bind, targetoptions, config):
"""
Rebuild an instance.
"""
func = serialize._rebuild_function(*func_reduced)
instance = cls(func, bind, targetoptions)
instance._deserialize_config(config)
return instance
def __reduce__(self):
"""
Reduce the instance for serialization.
Compiled definitions are discarded.
"""
glbls = serialize._get_function_globals_for_reduction(self.py_func)
func_reduced = serialize._reduce_function(self.py_func, glbls)
config = self._serialize_config()
args = (self.__class__, func_reduced, self.bind, self.targetoptions,
config)
return (serialize._rebuild_reduction, args)
| 34.923681
| 97
| 0.594273
|
ebdee138d6fd2468927c1b410cf2565e694792d8
| 1,052
|
py
|
Python
|
freezing/web/serialize.py
|
freezingsaddles/freezingsaddles
|
a65281c0183972aa647d9ffb45ff6bd7f8da47d5
|
[
"Apache-2.0"
] | 4
|
2019-01-02T15:14:46.000Z
|
2020-01-02T01:22:34.000Z
|
freezing/web/serialize.py
|
freezingsaddles/freezingsaddles
|
a65281c0183972aa647d9ffb45ff6bd7f8da47d5
|
[
"Apache-2.0"
] | 119
|
2018-01-19T13:34:39.000Z
|
2022-03-16T11:48:00.000Z
|
freezing/web/serialize.py
|
freezingsaddles/freezingsaddles
|
a65281c0183972aa647d9ffb45ff6bd7f8da47d5
|
[
"Apache-2.0"
] | 3
|
2016-07-29T02:26:24.000Z
|
2017-11-20T20:38:49.000Z
|
from marshmallow import Schema, fields
# shortcut
optional = dict(allow_none=True, required=False)
class AthleteSchema(Schema):
id = fields.Integer()
name = fields.String()
display_name = fields.String()
team_id = fields.Integer(**optional)
access_token = fields.String(**optional)
refresh_token = fields.String(**optional)
expires_at = fields.Integer()
profile_photo = fields.String(**optional)
# rides = orm.relationship("Ride", backref="athlete", lazy="dynamic", cascade="all, delete, delete-orphan")
class TeamSchema(Schema):
id = fields.Integer()
name = fields.String()
athletes = fields.Nested(AthleteSchema, many=True)
class RidePhotoSchema(Schema):
id = fields.String()
source = fields.Integer()
ride_id = fields.Integer()
ref = fields.String(**optional)
caption = fields.String(**optional)
img_t = fields.String(**optional)
img_l = fields.String(**optional)
primary = fields.Boolean()
img_l_dimensions = fields.List(fields.Integer, dump_only=True)
| 26.974359
| 111
| 0.692966
|
c54383ef1bb87effea3174212f317e646c831739
| 18,278
|
py
|
Python
|
src/commands/cmdset.py
|
abbacode/avaloria
|
02e1805ac6e74543c96408b7951429f94bc140ca
|
[
"ClArtistic"
] | null | null | null |
src/commands/cmdset.py
|
abbacode/avaloria
|
02e1805ac6e74543c96408b7951429f94bc140ca
|
[
"ClArtistic"
] | null | null | null |
src/commands/cmdset.py
|
abbacode/avaloria
|
02e1805ac6e74543c96408b7951429f94bc140ca
|
[
"ClArtistic"
] | null | null | null |
"""
A cmdset holds a set of commands available to the object or to other
objects near it. All the commands a player can give (look, @create etc)
are stored as the default cmdset on the player object and managed using the
CmdHandler object (see cmdhandler.py).
The power of having command sets in CmdSets like this is that CmdSets
can be merged together according to individual rules to create a new
on-the-fly CmdSet that is some combination of the
previous ones. Their function are borrowed to a large parts from mathematical
Set theory, it should not be much of a problem to understand.
See CmdHandler for practical examples on how to apply cmdsets
together to create interesting in-game effects.
"""
from django.utils.translation import ugettext as _
from src.utils.utils import inherits_from, is_iter
__all__ = ("CmdSet",)
class _CmdSetMeta(type):
"""
This metaclass makes some minor on-the-fly convenience fixes to
the cmdset class.
"""
def __init__(mcs, *args, **kwargs):
"""
Fixes some things in the cmdclass
"""
# by default we key the cmdset the same as the
# name of its class.
if not hasattr(mcs, 'key') or not mcs.key:
mcs.key = mcs.__name__
mcs.path = "%s.%s" % (mcs.__module__, mcs.__name__)
if not type(mcs.key_mergetypes) == dict:
mcs.key_mergetypes = {}
super(_CmdSetMeta, mcs).__init__(*args, **kwargs)
class CmdSet(object):
"""
This class describes a unique cmdset that understands priorities. CmdSets
can be merged and made to perform various set operations on each other.
CmdSets have priorities that affect which of their ingoing commands gets used.
In the examples, cmdset A always have higher priority than cmdset B.
key - the name of the cmdset. This can be used on its own for game operations
mergetype (partly from Set theory):
Union - The two command sets are merged so that as many
commands as possible of each cmdset ends up in the
merged cmdset. Same-name commands are merged by
priority. This is the most common default.
Ex: A1,A3 + B1,B2,B4,B5 = A1,B2,A3,B4,B5
Intersect - Only commands found in *both* cmdsets
(i.e. which have same names) end up in the merged
cmdset, with the higher-priority cmdset replacing the
lower one. Ex: A1,A3 + B1,B2,B4,B5 = A1
Replace - The commands of this cmdset completely replaces
the lower-priority cmdset's commands, regardless
of if same-name commands exist.
Ex: A1,A3 + B1,B2,B4,B5 = A1,A3
Remove - This removes the relevant commands from the
lower-priority cmdset completely. They are not
replaced with anything, so this in effects uses the
high-priority cmdset as a filter to affect the
low-priority cmdset.
Ex: A1,A3 + B1,B2,B4,B5 = B2,B4,B5
Note: Commands longer than 2 characters and starting
with double underscrores, like '__noinput_command'
are considered 'system commands' and are
excempt from all merge operations - they are
ALWAYS included across mergers and only affected
if same-named system commands replace them.
priority- All cmdsets are always merged in pairs of two so that
the higher set's mergetype is applied to the
lower-priority cmdset. Default commands have priority 0,
high-priority ones like Exits and Channels have 10 and 9. Priorities
can be negative as well to give default commands preference.
duplicates - determines what happens when two sets of equal
priority merge. Default has the first of them in the
merger (i.e. A above) automatically taking
precedence. But if allow_duplicates is true, the
result will be a merger with more than one of each
name match. This will usually lead to the player
receiving a multiple-match error higher up the road,
but can be good for things like cmdsets on non-player
objects in a room, to allow the system to warn that
more than one 'ball' in the room has the same 'kick'
command defined on it, so it may offer a chance to
select which ball to kick ... Allowing duplicates
only makes sense for Union and Intersect, the setting
is ignored for the other mergetypes.
key_mergetype (dict) - allows the cmdset to define a unique
mergetype for particular cmdsets. Format is
{CmdSetkeystring:mergetype}. Priorities still apply.
Example: {'Myevilcmdset','Replace'} which would make
sure for this set to always use 'Replace' on
Myevilcmdset no matter what overall mergetype this set
has.
no_objs - don't include any commands from nearby objects
when searching for suitable commands
no_exits - ignore the names of exits when matching against
commands
no_channels - ignore the name of channels when matching against
commands (WARNING- this is dangerous since the
player can then not even ask staff for help if
something goes wrong)
"""
__metaclass__ = _CmdSetMeta
key = "Unnamed CmdSet"
mergetype = "Union"
priority = 0
duplicates = False
key_mergetypes = {}
no_exits = False
no_objs = False
no_channels = False
permanent = False
# pre-store properties to duplicate straight off
to_duplicate = ("key", "cmdsetobj", "no_exits", "no_objs", "no_channels", "permanent",
"mergetype", "priority", "duplicates")
def __init__(self, cmdsetobj=None, key=None):
"""
Creates a new CmdSet instance.
cmdsetobj - this is the database object to which this particular
instance of cmdset is related. It is often a player but may also be a
regular object.
"""
if key:
self.key = key
self.commands = []
self.system_commands = []
self.actual_mergetype = self.mergetype
self.cmdsetobj = cmdsetobj
# initialize system
self.at_cmdset_creation()
self._contains_cache = {}
# Priority-sensitive merge operations for cmdsets
def _union(self, cmdset_a, cmdset_b, duplicates=False):
"C = A U B. CmdSet A is assumed to have higher priority"
cmdset_c = cmdset_a._duplicate()
# we make copies, not refs by use of [:]
cmdset_c.commands = cmdset_a.commands[:]
if duplicates and cmdset_a.priority == cmdset_b.priority:
cmdset_c.commands.extend(cmdset_b.commands)
else:
cmdset_c.commands.extend([cmd for cmd in cmdset_b if not cmd in cmdset_a])
return cmdset_c
def _intersect(self, cmdset_a, cmdset_b, duplicates=False):
"C = A (intersect) B. A is assumed higher priority"
cmdset_c = cmdset_a._duplicate()
if duplicates and cmdset_a.priority == cmdset_b.priority:
for cmd in [cmd for cmd in cmdset_a if cmd in cmdset_b]:
cmdset_c.add(cmd)
cmdset_c.add(cmdset_b.get(cmd))
else:
cmdset_c.commands = [cmd for cmd in cmdset_a if cmd in cmdset_b]
return cmdset_c
def _replace(self, cmdset_a, cmdset_b, cmdset_c):
"C = A + B where the result is A."
cmdset_c = cmdset_a._duplicate()
cmdset_c.commands = cmdset_a.commands[:]
return cmdset_c
def _remove(self, cmdset_a, cmdset_b, cmdset_c):
"C = A + B, where B is filtered by A"
cmdset_c = cmdset_a._duplicate()
cmdset_c.commands = [cmd for cmd in cmdset_b if not cmd in cmdset_a]
return cmdset_c
def _instantiate(self, cmd):
"""
checks so that object is an instantiated command
and not, say a cmdclass. If it is, instantiate it.
Other types, like strings, are passed through.
"""
try:
return cmd()
except TypeError:
return cmd
def _duplicate(self):
"""
Returns a new cmdset with the same settings as this one
(no actual commands are copied over)
"""
cmdset = CmdSet()
for key, val in ((key, getattr(self, key)) for key in self.to_duplicate):
if val != getattr(cmdset, key):
# only copy if different from default; avoid turning class-vars into instance vars
setattr(cmdset, key, val)
cmdset.key_mergetypes = self.key_mergetypes.copy()
return cmdset
#cmdset = self.__class__()
#cmdset.__dict__.update(dict((key, val) for key, val in self.__dict__.items() if key in self.to_duplicate))
#cmdset.key_mergetypes = self.key_mergetypes.copy() #copy.deepcopy(self.key_mergetypes)
#return cmdset
def __str__(self):
"""
Show all commands in cmdset when printing it.
"""
return ", ".join([str(cmd) for cmd in sorted(self.commands, key=lambda o:o.key)])
def __iter__(self):
"""
Allows for things like 'for cmd in cmdset':
"""
return iter(self.commands)
def __contains__(self, othercmd):
"""
Returns True if this cmdset contains the given command (as defined
by command name and aliases). This allows for things like 'if cmd in cmdset'
"""
ret = self._contains_cache.get(othercmd)
if ret == None:
ret = othercmd in self.commands
self._contains_cache[othercmd] = ret
return ret
def __add__(self, cmdset_b):
"""
Merge this cmdset (A) with another cmdset (B) using the + operator,
C = A + B
Here, we (by convention) say that 'A is merged onto B to form
C'. The actual merge operation used in the 'addition' depends
on which priorities A and B have. The one of the two with the
highest priority will apply and give its properties to C. In
the case of a tie, A takes priority and replaces the
same-named commands in B unless A has the 'duplicate' variable
set (which means both sets' commands are kept).
"""
# It's okay to merge with None
if not cmdset_b:
return self
sys_commands_a = self.get_system_cmds()
sys_commands_b = cmdset_b.get_system_cmds()
if self.priority >= cmdset_b.priority:
# A higher or equal priority than B
# preserve system __commands
sys_commands = sys_commands_a + [cmd for cmd in sys_commands_b if cmd not in sys_commands_a]
mergetype = self.key_mergetypes.get(cmdset_b.key, self.mergetype)
if mergetype == "Intersect":
cmdset_c = self._intersect(self, cmdset_b, cmdset_b.duplicates)
elif mergetype == "Replace":
cmdset_c = self._replace(self, cmdset_b, cmdset_b.duplicates)
elif mergetype == "Remove":
cmdset_c = self._remove(self, cmdset_b, cmdset_b.duplicates)
else: # Union
cmdset_c = self._union(self, cmdset_b, cmdset_b.duplicates)
cmdset_c.no_channels = self.no_channels
cmdset_c.no_exits = self.no_exits
cmdset_c.no_objs = self.no_objs
else:
# B higher priority than A
# preserver system __commands
sys_commands = sys_commands_b + [cmd for cmd in sys_commands_a if cmd not in sys_commands_b]
mergetype = cmdset_b.key_mergetypes.get(self.key, cmdset_b.mergetype)
if mergetype == "Intersect":
cmdset_c = self._intersect(cmdset_b, self, self.duplicates)
elif mergetype == "Replace":
cmdset_c = self._replace(cmdset_b, self, self.duplicates)
elif mergetype == "Remove":
cmdset_c = self._remove(self, cmdset_b, self.duplicates)
else: # Union
cmdset_c = self._union(cmdset_b, self, self.duplicates)
cmdset_c.no_channels = cmdset_b.no_channels
cmdset_c.no_exits = cmdset_b.no_exits
cmdset_c.no_objs = cmdset_b.no_objs
# we store actual_mergetype since key_mergetypes
# might be different from the main mergetype.
# This is used for diagnosis.
cmdset_c.actual_mergetype = mergetype
# return the system commands to the cmdset
cmdset_c.add(sys_commands)
return cmdset_c
def add(self, cmd):
"""
Add a command, a list of commands or a cmdset to this cmdset.
Note that if cmd already exists in set,
it will replace the old one (no priority checking etc
at this point; this is often used to overload
default commands).
If cmd is another cmdset class or -instance, the commands
of that command set is added to this one, as if they were part
of the original cmdset definition. No merging or priority checks
are made, rather later added commands will simply replace
existing ones to make a unique set.
"""
if inherits_from(cmd, "src.commands.cmdset.CmdSet"):
# cmd is a command set so merge all commands in that set
# to this one. We raise a visible error if we created
# an infinite loop (adding cmdset to itself somehow)
try:
cmd = self._instantiate(cmd)
except RuntimeError:
string = "Adding cmdset %(cmd)s to %(class)s lead to an infinite loop. When adding a cmdset to another, "
string += "make sure they are not themself cyclically added to the new cmdset somewhere in the chain."
raise RuntimeError(_(string) % {"cmd":cmd, "class":self.__class__})
cmds = cmd.commands
elif is_iter(cmd):
cmds = [self._instantiate(c) for c in cmd]
else:
cmds = [self._instantiate(cmd)]
commands = self.commands
system_commands = self.system_commands
for cmd in cmds:
# add all commands
if not hasattr(cmd, 'obj'):
cmd.obj = self.cmdsetobj
try:
ic = commands.index(cmd)
commands[ic] = cmd # replace
except ValueError:
commands.append(cmd)
# extra run to make sure to avoid doublets
self.commands = list(set(commands))
#print "In cmdset.add(cmd):", self.key, cmd
# add system_command to separate list as well,
# for quick look-up
if cmd.key.startswith("__"):
try:
ic = system_commands.index(cmd)
system_commands[ic] = cmd # replace
except ValueError:
system_commands.append(cmd)
def remove(self, cmd):
"""
Remove a command instance from the cmdset.
cmd can be either a cmd instance or a key string.
"""
cmd = self._instantiate(cmd)
self.commands = [oldcmd for oldcmd in self.commands if oldcmd != cmd]
def get(self, cmd):
"""
Return the command in this cmdset that matches the
given command. cmd may be either a command instance or
a key string.
"""
cmd = self._instantiate(cmd)
for thiscmd in self.commands:
if thiscmd == cmd:
return thiscmd
def count(self):
"Return number of commands in set"
return len(self.commands)
def get_system_cmds(self):
"""
Return system commands in the cmdset, defined as
commands starting with double underscore __.
These are excempt from merge operations.
"""
return self.system_commands
#return [cmd for cmd in self.commands if cmd.key.startswith('__')]
def make_unique(self, caller):
"""
This is an unsafe command meant to clean out a cmdset of
doublet commands after it has been created. It is useful
for commands inheriting cmdsets from the cmdhandler where
obj-based cmdsets always are added double. Doublets will
be weeded out with preference to commands defined on caller,
otherwise just by first-come-first-served.
"""
unique = {}
for cmd in self.commands:
if cmd.key in unique:
ocmd = unique[cmd.key]
if (hasattr(cmd, 'obj') and cmd.obj == caller) and not \
(hasattr(ocmd, 'obj') and ocmd.obj == caller):
unique[cmd.key] = cmd
else:
unique[cmd.key] = cmd
self.commands = unique.values()
def get_all_cmd_keys_and_aliases(self, caller=None):
"""
Returns a list of all command keys and aliases
available in this cmdset. If caller is given, the
commands is checked for access on the "call" type
before being returned.
"""
names = []
if caller:
[names.extend(cmd._keyaliases) for cmd in self.commands if cmd.access(caller)]
else:
[names.extend(cmd._keyaliases) for cmd in self.commands]
return names
def at_cmdset_creation(self):
"""
Hook method - this should be overloaded in the inheriting
class, and should take care of populating the cmdset
by use of self.add().
"""
pass
| 41.635535
| 121
| 0.598917
|
a9b9a871d28a4764dfb7693ea647326d7012450b
| 277
|
py
|
Python
|
cyder/cydhcp/workgroup/forms.py
|
drkitty/cyder
|
1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8
|
[
"BSD-3-Clause"
] | 6
|
2015-04-16T23:18:22.000Z
|
2020-08-25T22:50:13.000Z
|
cyder/cydhcp/workgroup/forms.py
|
drkitty/cyder
|
1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8
|
[
"BSD-3-Clause"
] | 267
|
2015-01-01T00:18:57.000Z
|
2015-10-14T00:01:13.000Z
|
cyder/cydhcp/workgroup/forms.py
|
drkitty/cyder
|
1babc443cc03aa51fa3c1015bcd22f0ea2e5f0f8
|
[
"BSD-3-Clause"
] | 5
|
2015-03-23T00:57:09.000Z
|
2019-09-09T22:42:37.000Z
|
from django import forms
from cyder.base.eav.forms import get_eav_form
from cyder.cydhcp.workgroup.models import Workgroup, WorkgroupAV
class WorkgroupForm(forms.ModelForm):
class Meta:
model = Workgroup
WorkgroupAVForm = get_eav_form(WorkgroupAV, Workgroup)
| 19.785714
| 64
| 0.787004
|
ef4daf5108e3ab137d61fdff23b3061d541c285c
| 4,730
|
py
|
Python
|
package/scripts/prmon_compress_output.py
|
nikoladze/prmon
|
5f69f056e47119d2ed8a9379d4f4ad4290f27457
|
[
"Apache-2.0"
] | 35
|
2018-03-28T11:32:16.000Z
|
2022-03-11T19:05:11.000Z
|
package/scripts/prmon_compress_output.py
|
nikoladze/prmon
|
5f69f056e47119d2ed8a9379d4f4ad4290f27457
|
[
"Apache-2.0"
] | 166
|
2018-03-13T20:19:09.000Z
|
2022-01-31T08:54:22.000Z
|
package/scripts/prmon_compress_output.py
|
HEP-SF/prmon
|
33ce283183cbb31dc779f43fe899d26a699b1cc6
|
[
"Apache-2.0"
] | 13
|
2018-03-16T09:37:26.000Z
|
2022-01-26T07:38:39.000Z
|
#! /usr/bin/env python3
"""prmon output smart compression script"""
import argparse
import os
import sys
try:
import pandas as pd
except ImportError:
print("ERROR:: This script needs pandas.")
sys.exit(-1)
MEMORY_IO_NETWORK_GPU_CPU = [
"vmem",
"pss",
"rss",
"swap",
"rchar",
"wchar",
"read_bytes",
"write_bytes",
"rx_packets",
"tx_packets",
"rx_bytes",
"tx_bytes",
"gpufbmem",
"gpumempct",
"gpusmpct",
"utime",
"stime",
]
NPROCS_NTHREADS_NGPUS = ["nprocs", "nthreads", "ngpus"]
def interp_drop(p1, p2, p3, eps):
"""Computesinterpolation and checks if middle point falls within threshold"""
t = p1[1] + (p3[1] - p1[1]) / (p3[0] - p1[0]) * (p2[0] - p1[0])
return abs(t - p2[1]) < eps
def reduce_changing_metric(df, metric, precision):
"""Iteratively compress metric"""
metric_series = df[metric]
metric_redux = metric_series.copy()
dyn_range = metric_series.max() - metric_series.min()
eps = dyn_range * precision
idx = 0
while True:
metriclen = len(metric_redux)
if idx == metriclen - 2:
break
p1 = (metric_redux.index[idx], metric_redux.iloc[idx])
p2 = (metric_redux.index[idx + 1], metric_redux.iloc[idx + 1])
p3 = (metric_redux.index[idx + 2], metric_redux.iloc[idx + 2])
if interp_drop(p1, p2, p3, eps):
metric_redux = metric_redux.drop(metric_redux.index[idx + 1])
else:
idx += 1
return metric_redux
def reduce_steady_metric(df, metric):
"""For more steady metrics just keep the changing points"""
metric = df[metric]
return metric[metric != metric.shift(1)]
def compress_prmon_output(df, precision, skip_interpolate):
"""Compress full df. Final index is the union of the compressed series indexes.
Points without values for a series are either linearly interpolated,
for fast-changing metrics, or forward-filled, for steady metrics"""
if len(df) > 2:
present_changing_metrics = [
metric for metric in MEMORY_IO_NETWORK_GPU_CPU if metric in df.columns
]
present_steady_metrics = [
metric for metric in NPROCS_NTHREADS_NGPUS if metric in df.columns
]
reduced_changing_metrics = [
reduce_changing_metric(df, metric, precision)
for metric in present_changing_metrics
]
reduced_steady_metrics = [
reduce_steady_metric(df, metric) for metric in present_steady_metrics
]
final_df = pd.concat(reduced_changing_metrics + reduced_steady_metrics, axis=1)
if not skip_interpolate:
final_df[present_changing_metrics] = final_df[
present_changing_metrics
].interpolate(method="index")
final_df[present_steady_metrics] = final_df[present_steady_metrics].ffill(
downcast="infer"
)
final_df = final_df.round(0)
final_df = final_df.astype("Int64", errors="ignore")
return final_df
return df
def main():
"""Main compression function"""
parser = argparse.ArgumentParser(
description="Configurable smart compression script"
)
parser.add_argument(
"--input",
type=str,
default="prmon.txt",
help="PrMon TXT output that will be used as input",
)
parser.add_argument(
"--output",
type=str,
default="prmon_compressed.txt",
help="name of the output compressed text file",
)
parser.add_argument(
"--precision",
type=lambda x: float(x)
if 0 < float(x) < 1
else parser.exit(-1, "Precision must be strictly between 0 and 1"),
default=0.05,
help="precision value for interpolation threshold",
)
parser.add_argument(
"--skip-interpolate",
default=False,
action="store_true",
help="""Whether to skip interpolation of the final obtained df,
and leave NAs for the different metrics""",
)
parser.add_argument(
"--delete-original",
default=False,
action="store_true",
help="""Add this to delete the original, uncompressed
file""",
)
args = parser.parse_args()
df = pd.read_csv(
args.input, sep="\t", index_col="Time", engine="c", na_filter=False
)
compressed_df = compress_prmon_output(df, args.precision, args.skip_interpolate)
compressed_df["wtime"] = df[df.index.isin(compressed_df.index)]["wtime"]
compressed_df.to_csv(args.output, sep="\t")
if args.delete_original:
os.remove(args.input)
if "__main__" in __name__:
main()
| 29.197531
| 87
| 0.62093
|
0c121c218979c0f4722383e78010490072f227c0
| 393
|
py
|
Python
|
backend/chat_user_profile/api/v1/urls.py
|
crowdbotics-apps/hey-neighbor-30236
|
be2e66cbfe274f6e01584bf1e54e661b75eaa0de
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/chat_user_profile/api/v1/urls.py
|
crowdbotics-apps/hey-neighbor-30236
|
be2e66cbfe274f6e01584bf1e54e661b75eaa0de
|
[
"FTL",
"AML",
"RSA-MD"
] | 20
|
2021-09-06T14:23:04.000Z
|
2022-03-13T17:37:36.000Z
|
backend/chat_user_profile/api/v1/urls.py
|
crowdbotics-apps/textme-30334
|
120bb87693a98e352c36fd116b4426e209b773ba
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import VerificationCodeViewSet, ProfileViewSet, ContactViewSet
router = DefaultRouter()
router.register("verificationcode", VerificationCodeViewSet)
router.register("contact", ContactViewSet)
router.register("profile", ProfileViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| 30.230769
| 77
| 0.80916
|
cb06ba14954d7ce3bf33f4de78f403ffff03cf98
| 15,930
|
py
|
Python
|
raiden_contracts/tests/test_token_network.py
|
karlb/raiden-contracts
|
944eb6aa4cc0189caab5b735b46bb6fb72ad5658
|
[
"MIT"
] | null | null | null |
raiden_contracts/tests/test_token_network.py
|
karlb/raiden-contracts
|
944eb6aa4cc0189caab5b735b46bb6fb72ad5658
|
[
"MIT"
] | null | null | null |
raiden_contracts/tests/test_token_network.py
|
karlb/raiden-contracts
|
944eb6aa4cc0189caab5b735b46bb6fb72ad5658
|
[
"MIT"
] | null | null | null |
from typing import Callable
import pytest
from eth_tester.exceptions import TransactionFailed
from web3 import Web3
from web3.contract import Contract
from raiden_contracts.constants import (
EMPTY_ADDRESS,
TEST_SETTLE_TIMEOUT_MAX,
TEST_SETTLE_TIMEOUT_MIN,
)
from raiden_contracts.tests.utils.constants import NOT_ADDRESS, UINT256_MAX
def test_constructor_call(
web3: Web3,
get_token_network: Callable,
custom_token: Contract,
secret_registry_contract: Contract,
get_accounts: Callable,
channel_participant_deposit_limit: int,
token_network_deposit_limit: int,
) -> None:
""" Try to deploy TokenNetwork with various wrong arguments """
(A, deprecation_executor) = get_accounts(2)
chain_id = web3.eth.chain_id
settle_min = TEST_SETTLE_TIMEOUT_MIN
settle_max = TEST_SETTLE_TIMEOUT_MAX
# failure with no arguments
with pytest.raises(TypeError):
get_token_network([])
# failures with integers instead of a Token address
with pytest.raises(TypeError):
get_token_network(
[
3,
secret_registry_contract.address,
chain_id,
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
with pytest.raises(TypeError):
get_token_network(
[
0,
secret_registry_contract.address,
chain_id,
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
# failures with non-address strings instead of a Token address
with pytest.raises(TypeError):
get_token_network(
[
"",
secret_registry_contract.address,
chain_id,
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
with pytest.raises(TypeError):
get_token_network(
[
NOT_ADDRESS,
secret_registry_contract.address,
chain_id,
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
# failures with integers instead of a SecretRegistry address
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
3,
chain_id,
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
0,
chain_id,
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
# failures with non-address strings instead of a SecretRegistry address
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
"",
chain_id,
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
NOT_ADDRESS,
chain_id,
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
# failures with invalid chain_id
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
secret_registry_contract.address,
"",
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
secret_registry_contract.address,
-3,
settle_min,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
# failures with invalid settle_min
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
secret_registry_contract.address,
chain_id,
"",
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
secret_registry_contract.address,
chain_id,
-3,
settle_max,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
# failures with invalid settle_max
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
secret_registry_contract.address,
chain_id,
settle_min,
"",
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
with pytest.raises(TypeError):
get_token_network(
[
custom_token.address,
secret_registry_contract.address,
chain_id,
settle_min,
-3,
deprecation_executor,
channel_participant_deposit_limit,
token_network_deposit_limit,
]
)
# failures with Ethereum addresses that don't contain a Token contract
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=EMPTY_ADDRESS,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=A,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=secret_registry_contract.address,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
# failures with Ethereum addresses that don't contain the SecretRegistry contract
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=custom_token.address,
_secret_registry=EMPTY_ADDRESS,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=custom_token.address,
_secret_registry=A,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
# failure with chain_id zero
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=custom_token.address,
_secret_registry=secret_registry_contract.address,
_chain_id=0,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
# failure with a timeout min and max swapped
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=custom_token.address,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MAX,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MIN,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
# failure with settle_timeout_min being zero
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=custom_token.address,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=0,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MIN,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
# failure with settle_timeout_max being zero
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=custom_token.address,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=0,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
# failure with channel_participant_deposit_limit being zero
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=custom_token.address,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=0,
_token_network_deposit_limit=token_network_deposit_limit,
)
# failure with both limits being zero
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=custom_token.address,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=0,
_token_network_deposit_limit=0,
)
# failure with channel_participant_deposit_limit being bigger than
# token_network_deposit_limit.
with pytest.raises(TransactionFailed):
get_token_network(
_token_address=custom_token.address,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=token_network_deposit_limit,
_token_network_deposit_limit=channel_participant_deposit_limit,
)
# see a success to make sure that the above failures are meaningful
get_token_network(
_token_address=custom_token.address,
_secret_registry=secret_registry_contract.address,
_chain_id=chain_id,
_settlement_timeout_min=TEST_SETTLE_TIMEOUT_MIN,
_settlement_timeout_max=TEST_SETTLE_TIMEOUT_MAX,
_deprecation_executor=deprecation_executor,
_channel_participant_deposit_limit=channel_participant_deposit_limit,
_token_network_deposit_limit=token_network_deposit_limit,
)
def test_token_network_variables(
token_network: Contract, token_network_test_utils: Contract
) -> None:
""" Check values of storage variables of the TokenNetwork contract """
max_safe_uint256 = token_network_test_utils.functions.get_max_safe_uint256().call()
assert token_network.functions.MAX_SAFE_UINT256().call() == max_safe_uint256
assert max_safe_uint256 == UINT256_MAX
assert token_network.functions.channel_counter().call() == 0
def test_token_network_utils_variables(token_network_utils_library: Contract) -> None:
assert (
token_network_utils_library.functions.signature_prefix().call()
== "\x19Ethereum Signed Message:\n"
)
@pytest.mark.usefixtures("no_token_network")
def test_constructor_not_registered(
custom_token: Contract,
secret_registry_contract: Contract,
token_network_registry_contract: Contract,
token_network_external: Contract,
) -> None:
""" Check that the TokenNetwork refers to the right Token address and chain_id """
token_network = token_network_external
assert token_network.functions.token().call() == custom_token.address
assert token_network.functions.secret_registry().call() == secret_registry_contract.address
assert (
token_network.functions.chain_id().call()
== token_network_registry_contract.functions.chain_id().call()
)
# The TokenNetworkRegistry doesn't know about the TokenNetwork
assert (
token_network_registry_contract.functions.token_to_token_networks(
custom_token.address
).call()
== EMPTY_ADDRESS
)
| 36.287016
| 95
| 0.644382
|
f5cae8f14fd8f8e9f5d805da7ed2132de928b789
| 4,771
|
py
|
Python
|
tests/test_indexes.py
|
zikphil/umongo
|
ab96ae76c56cfa1c636ff965096a2f3a19a3efd0
|
[
"MIT"
] | 1
|
2021-04-28T12:15:42.000Z
|
2021-04-28T12:15:42.000Z
|
tests/test_indexes.py
|
AndsoiIo/umongo
|
bded0bade1d8d3ec1414171f3f1c337fec6ebe81
|
[
"MIT"
] | null | null | null |
tests/test_indexes.py
|
AndsoiIo/umongo
|
bded0bade1d8d3ec1414171f3f1c337fec6ebe81
|
[
"MIT"
] | null | null | null |
from itertools import zip_longest
import pytest
from umongo import Document, EmbeddedDocument, fields
from umongo.indexes import (
explicit_key, parse_index,
IndexModel, ASCENDING, DESCENDING, TEXT, HASHED)
from .common import BaseTest
def assert_indexes(indexes1, indexes2):
if hasattr(indexes1, '__iter__'):
for e1, e2 in zip_longest(indexes1, indexes2):
assert e1, "missing index %s" % e2.document
assert e2, "too much indexes: %s" % e1.document
assert e1.document == e2.document
else:
assert indexes1.document == indexes2.document
class TestIndexes(BaseTest):
def test_parse_index(self):
for value, expected in (
('my_index', IndexModel([('my_index', ASCENDING)])),
('+my_index', IndexModel([('my_index', ASCENDING)])),
('-my_index', IndexModel([('my_index', DESCENDING)])),
('$my_index', IndexModel([('my_index', TEXT)])),
('#my_index', IndexModel([('my_index', HASHED)])),
# Compound indexes
(('index1', '-index2'), IndexModel([('index1', ASCENDING), ('index2', DESCENDING)])),
# No changes if not needed
(IndexModel([('my_index', ASCENDING)]), IndexModel([('my_index', ASCENDING)])),
# Custom index
(
{
'name': 'my-custom-index',
'key': ['+index1', '-index2'],
'sparse': True,
'unique': True,
'expireAfterSeconds': 42
},
IndexModel([('index1', ASCENDING), ('index2', DESCENDING)],
name='my-custom-index', sparse=True,
unique=True, expireAfterSeconds=42)
),
):
assert_indexes(parse_index(value), expected)
def test_explicit_key(self):
for value, expected in (
('my_index', ('my_index', ASCENDING)),
('+my_index', ('my_index', ASCENDING)),
('-my_index', ('my_index', DESCENDING)),
('$my_index', ('my_index', TEXT)),
('#my_index', ('my_index', HASHED)),
# No changes if not needed
(('my_index', ASCENDING), ('my_index', ASCENDING)),
):
assert explicit_key(value) == expected
def test_inheritance(self):
@self.instance.register
class Parent(Document):
last_name = fields.StrField()
class Meta:
indexes = ['last_name']
@self.instance.register
class Child(Parent):
first_name = fields.StrField()
class Meta:
indexes = ['-first_name']
assert_indexes(Parent.opts.indexes, [IndexModel([('last_name', ASCENDING)])])
assert_indexes(Child.opts.indexes,
[
IndexModel([('last_name', ASCENDING)]),
IndexModel([('first_name', DESCENDING), ('_cls', ASCENDING)]),
IndexModel([('_cls', ASCENDING)])
])
def test_bad_index(self):
for bad in [1, None, object()]:
with pytest.raises(TypeError) as exc:
parse_index(1)
assert exc.value.args[0] == (
'Index type must be <str>, <list>, <dict> or <pymongo.IndexModel>')
def test_nested_indexes(self):
"""Test multikey indexes
Note: umongo does not check that indexes entered in Meta match existing fields
"""
@self.instance.register
class Doc(Document):
class Meta:
indexes = [
'parent', 'parent.child', 'parent.child.grandchild',
]
assert_indexes(Doc.opts.indexes,
[
IndexModel([('parent', ASCENDING)]),
IndexModel([('parent.child', ASCENDING)]),
IndexModel([('parent.child.grandchild', ASCENDING)]),
])
@pytest.mark.parametrize("unique_field", ("nested", "list"))
def test_unique_indexes(self, unique_field):
@self.instance.register
class NestedDoc(EmbeddedDocument):
simple = fields.StrField(unique=True)
u_field, index = {
"nested": (
fields.EmbeddedField(NestedDoc),
IndexModel([('field.simple', ASCENDING)], unique=True, sparse=True),
),
"list": (
fields.ListField(fields.EmbeddedField(NestedDoc)),
IndexModel([('field.simple', ASCENDING)], unique=True, sparse=True),
),
}[unique_field]
@self.instance.register
class Doc(Document):
field = u_field
assert_indexes(Doc.opts.indexes, [index])
| 34.824818
| 97
| 0.535946
|
c5dce030a47e73168228d4df20e8b8f6914b0ddd
| 537
|
py
|
Python
|
Leetcode/829-consecutive_numbers_sum.py
|
EdwaRen/Competitve-Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | 1
|
2021-05-03T21:48:25.000Z
|
2021-05-03T21:48:25.000Z
|
Leetcode/829-consecutive_numbers_sum.py
|
EdwaRen/Competitve_Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | null | null | null |
Leetcode/829-consecutive_numbers_sum.py
|
EdwaRen/Competitve_Programming
|
e8bffeb457936d28c75ecfefb5a1f316c15a9b6c
|
[
"MIT"
] | null | null | null |
class Solution(object):
def consecutiveNumbersSum(self, N):
"""
:type N: int
:rtype: int
"""
# start off with N = N as a possibility
res = 1
# i is the number of elements in a consecutive chunk
i = 2
while i*(i+1)//2 <= N:
# n0 is the smallest consecutive sum with i elements
if (N - ((i)*(i+1)//2)) % i == 0:
res += 1
i += 1
return res
z = Solution()
N = 15
print("ans", z.consecutiveNumbersSum(N))
| 26.85
| 64
| 0.480447
|
c9b001ac3c61c69e740f07abf11395693b484515
| 269
|
py
|
Python
|
cloudtools/stop.py
|
rbonazzola/cloudtools
|
75cb0d86423b814382591206af7a543df7ff55b7
|
[
"MIT"
] | null | null | null |
cloudtools/stop.py
|
rbonazzola/cloudtools
|
75cb0d86423b814382591206af7a543df7ff55b7
|
[
"MIT"
] | null | null | null |
cloudtools/stop.py
|
rbonazzola/cloudtools
|
75cb0d86423b814382591206af7a543df7ff55b7
|
[
"MIT"
] | null | null | null |
from subprocess import call
def init_parser(parser):
parser.add_argument('name', type=str, help='Cluster name.')
def main(args):
print("Stopping cluster '{}'...".format(args.name))
call(['gcloud', 'dataproc', 'clusters', 'delete', '--quiet', args.name])
| 26.9
| 76
| 0.66171
|
39a649ed407f4a54cdbfa8c14aacb047cdb117e3
| 868
|
py
|
Python
|
printscreen.py
|
SilasPDJ/maeportifolios_desktop_etc
|
a341648c8161251d42055155f6fd99fd388d9f2d
|
[
"MIT"
] | null | null | null |
printscreen.py
|
SilasPDJ/maeportifolios_desktop_etc
|
a341648c8161251d42055155f6fd99fd388d9f2d
|
[
"MIT"
] | null | null | null |
printscreen.py
|
SilasPDJ/maeportifolios_desktop_etc
|
a341648c8161251d42055155f6fd99fd388d9f2d
|
[
"MIT"
] | null | null | null |
def executa():
from time import sleep
import pyautogui as pygui
path = r'C:\Users\Silas\OneDrive\Mae_Area de Trabalho\_PRINTSCREEN_3ANO-2020\11-11'
prossegue = True
try:
with open('counter', 'r') as file:
valor = file.read()
except FileNotFoundError:
with open('counter', 'w') as file:
file.write('1')
else:
with open('counter', 'w') as file:
file.write(str(int(valor)+1))
full_path = f'{path}\\print_{valor}.jpg'
pygui.getActiveWindow().minimize()
sleep(1)
pygui.screenshot(full_path)
import tkinter as tk
root = tk.Tk()
frame = tk.Frame(root)
frame.pack()
button = tk.Button(frame,
text="TIRA PRINT AUTOMATICO",
fg="red",
command=executa)
button.pack(side=tk.LEFT)
root.mainloop()
| 23.459459
| 87
| 0.576037
|
b4966031a0787a0ef834238b532f4faa7549910a
| 2,413
|
py
|
Python
|
src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2019_04_01/models/managed_cluster_upgrade_profile.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 2
|
2021-06-05T17:51:26.000Z
|
2021-11-17T11:17:56.000Z
|
src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2019_04_01/models/managed_cluster_upgrade_profile.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 3
|
2020-05-27T20:16:26.000Z
|
2020-07-23T19:46:49.000Z
|
src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2019_04_01/models/managed_cluster_upgrade_profile.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 5
|
2020-05-09T17:47:09.000Z
|
2020-10-01T19:52:06.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ManagedClusterUpgradeProfile(Model):
"""The list of available upgrades for compute pools.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of upgrade profile.
:vartype id: str
:ivar name: Name of upgrade profile.
:vartype name: str
:ivar type: Type of upgrade profile.
:vartype type: str
:param control_plane_profile: Required. The list of available upgrade
versions for the control plane.
:type control_plane_profile:
~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterPoolUpgradeProfile
:param agent_pool_profiles: Required. The list of available upgrade
versions for agent pools.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2019_04_01.models.ManagedClusterPoolUpgradeProfile]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'control_plane_profile': {'required': True},
'agent_pool_profiles': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'control_plane_profile': {'key': 'properties.controlPlaneProfile', 'type': 'ManagedClusterPoolUpgradeProfile'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterPoolUpgradeProfile]'},
}
def __init__(self, **kwargs):
super(ManagedClusterUpgradeProfile, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.control_plane_profile = kwargs.get('control_plane_profile', None)
self.agent_pool_profiles = kwargs.get('agent_pool_profiles', None)
| 38.919355
| 119
| 0.641525
|
457f71995ba4ca80041dcc038a629911e723e013
| 1,042
|
py
|
Python
|
src/slippinj/cli/interactive/properties_file.py
|
scm-spain/slippin-jimmy
|
d0e52277daff523eda63f5d3137b5a990413923d
|
[
"Apache-2.0"
] | 7
|
2016-03-31T06:17:23.000Z
|
2018-01-25T15:25:05.000Z
|
src/slippinj/cli/interactive/properties_file.py
|
scm-spain/slippin-jimmy
|
d0e52277daff523eda63f5d3137b5a990413923d
|
[
"Apache-2.0"
] | 8
|
2016-03-30T18:45:09.000Z
|
2017-06-19T09:21:35.000Z
|
src/slippinj/cli/interactive/properties_file.py
|
scm-spain/slippin-jimmy
|
d0e52277daff523eda63f5d3137b5a990413923d
|
[
"Apache-2.0"
] | 13
|
2017-04-21T08:17:14.000Z
|
2019-07-12T04:59:24.000Z
|
import os
from injector import inject
class PropertiesFile(object):
"""Get the properties file to run on the cluster"""
@inject(filesystem='filesystem')
def __init__(self, filesystem):
"""
Initialize the class
:param filesystem: Filesystem
"""
super(PropertiesFile, self).__init__()
self.__filesystem = filesystem
def get(self, wf_dir):
"""
Find all the properties file inside given workflow directory
:param wf_dir: string
:return: string
"""
property_files = self.__filesystem.find_properties_file(wf_dir)
print 'Properties files found:'
print '[0] None'
for index, property_file in enumerate(property_files):
print '[{index}] {file}'.format(index=(index + 1), file=os.path.basename(property_file))
file_to_run = int(raw_input('Select which file you would like to run: '))
if file_to_run > 0:
return property_files[(file_to_run - 1)]
return False
| 28.162162
| 100
| 0.626679
|
86bbf1f13f452312e9538f84fb98e059928f2d3c
| 95
|
py
|
Python
|
Tutorials/01. 10 Days of Statistics/013. Day 5 - Normal Distribution II.py
|
stonehengee/HackerrankPractice
|
ec052e7447391e40d1919cf0b641ff5023da3da3
|
[
"MIT"
] | null | null | null |
Tutorials/01. 10 Days of Statistics/013. Day 5 - Normal Distribution II.py
|
stonehengee/HackerrankPractice
|
ec052e7447391e40d1919cf0b641ff5023da3da3
|
[
"MIT"
] | null | null | null |
Tutorials/01. 10 Days of Statistics/013. Day 5 - Normal Distribution II.py
|
stonehengee/HackerrankPractice
|
ec052e7447391e40d1919cf0b641ff5023da3da3
|
[
"MIT"
] | null | null | null |
# Problem: https://www.hackerrank.com/challenges/s10-normal-distribution-2/problem
# Score: 30
| 31.666667
| 82
| 0.778947
|
841518f1e4e615b38404905fa1e62f45ce7342dc
| 1,632
|
py
|
Python
|
example/myshop/models/polymorphic_/smartcard.py
|
haitwang-cloud/django-shop
|
8ac767a42022d66d226c0bb342f16ac3df3ca30b
|
[
"BSD-3-Clause"
] | 39
|
2015-02-21T00:45:02.000Z
|
2020-05-18T14:46:09.000Z
|
example/myshop/models/polymorphic_/smartcard.py
|
haitwang-cloud/django-shop
|
8ac767a42022d66d226c0bb342f16ac3df3ca30b
|
[
"BSD-3-Clause"
] | 46
|
2015-02-03T19:51:37.000Z
|
2017-03-24T23:40:14.000Z
|
example/myshop/models/polymorphic_/smartcard.py
|
haitwang-cloud/django-shop
|
8ac767a42022d66d226c0bb342f16ac3df3ca30b
|
[
"BSD-3-Clause"
] | 23
|
2015-04-12T09:03:41.000Z
|
2020-04-14T16:38:35.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from djangocms_text_ckeditor.fields import HTMLField
from shop.money.fields import MoneyField
from .product import Product, BaseProductManager
class SmartCard(Product):
# common product fields
unit_price = MoneyField(
_("Unit price"),
decimal_places=3,
help_text=_("Net price for this product"),
)
# product properties
CARD_TYPE = (2 * ('{}{}'.format(s, t),)
for t in ('SD', 'SDXC', 'SDHC', 'SDHC II') for s in ('', 'micro '))
card_type = models.CharField(
_("Card Type"),
choices=CARD_TYPE,
max_length=15,
)
SPEED = [(str(s), "{} MB/s".format(s)) for s in (4, 20, 30, 40, 48, 80, 95, 280)]
speed = models.CharField(
_("Transfer Speed"),
choices=SPEED,
max_length=8,
)
product_code = models.CharField(
_("Product code"),
max_length=255,
unique=True,
)
storage = models.PositiveIntegerField(
_("Storage Capacity"),
help_text=_("Storage capacity in GB"),
)
description = HTMLField(
verbose_name=_("Description"),
configuration='CKEDITOR_SETTINGS_DESCRIPTION',
help_text=_("Full description used in the catalog's detail view of Smart Cards."),
)
default_manager = BaseProductManager()
class Meta:
verbose_name = _("Smart Card")
verbose_name_plural = _("Smart Cards")
def get_price(self, request):
return self.unit_price
| 26.322581
| 90
| 0.621324
|
9b413a367175d1baef75de163c0718d33dc388f4
| 260
|
py
|
Python
|
dev_generate_model.py
|
Jimmy-Xu/fastapi_demo
|
f19c629cc7fa0e0e47e73e8688cd019bc74aa982
|
[
"MIT"
] | 12
|
2020-09-01T09:19:41.000Z
|
2022-03-17T05:48:50.000Z
|
dev_generate_model.py
|
Jimmy-Xu/fastapi_demo
|
f19c629cc7fa0e0e47e73e8688cd019bc74aa982
|
[
"MIT"
] | null | null | null |
dev_generate_model.py
|
Jimmy-Xu/fastapi_demo
|
f19c629cc7fa0e0e47e73e8688cd019bc74aa982
|
[
"MIT"
] | 3
|
2021-04-26T02:53:04.000Z
|
2021-11-01T14:32:38.000Z
|
# 基于Demo代码生成新的模块,省去复制粘贴
import os
from fastapi_plus.utils.generate_model import GenerateModel
if __name__ == "__main__":
app_dir = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'app'
model_name = 'test'
GenerateModel(app_dir, model_name)
| 26
| 73
| 0.742308
|
0fc61fa1317a585b9c0e8c8498a3002463cb631a
| 469
|
py
|
Python
|
packages/python/plotly/plotly/validators/volume/colorbar/_borderwidth.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/volume/colorbar/_borderwidth.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/volume/colorbar/_borderwidth.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="borderwidth", parent_name="volume.colorbar", **kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
| 31.266667
| 80
| 0.643923
|
8ba96c8c411ef9dfbee57d89b58fc17b8f8f7e78
| 3,820
|
py
|
Python
|
drapps/util.py
|
schwa-lab/dr-apps-python
|
e1b7b68de54cb86f60e5c570f4ebce6c47820c39
|
[
"MIT"
] | 3
|
2015-03-23T17:19:10.000Z
|
2016-01-11T16:40:57.000Z
|
drapps/util.py
|
schwa-lab/dr-apps-python
|
e1b7b68de54cb86f60e5c570f4ebce6c47820c39
|
[
"MIT"
] | null | null | null |
drapps/util.py
|
schwa-lab/dr-apps-python
|
e1b7b68de54cb86f60e5c570f4ebce6c47820c39
|
[
"MIT"
] | null | null | null |
# vim: set et nosi ai ts=2 sts=2 sw=2:
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import collections
import msgpack
import six
from six.moves import xrange
RawDoc = collections.namedtuple('RawDoc', ('version', 'klasses', 'stores', 'doc', 'instances'))
class RawDoc(object):
__slots__ = ('version', 'klasses', 'stores', '_doc', '_instances', '_doc_packed', '_instances_packed')
def __init__(self, version, klasses, stores, doc, instances, packed=True):
self.version = version
self.klasses = klasses
self.stores = stores
if packed:
self._doc_packed = doc
self._instances_packed = instances
self._doc = self._instances = None
else:
self._doc = doc
self._instances = instances
self._doc_packed = self._instances_packed = None
@classmethod
def from_stream(cls, unpacker, on_end='error', encoding='utf-8'):
if on_end not in ('error', 'break'):
raise ValueError('on_end must be "error" or "break"')
if not hasattr(unpacker, 'unpack'):
if six.PY2 and isinstance(encoding, six.text_type):
encoding = encoding.encode('utf-8')
unpacker = msgpack.Unpacker(unpacker, use_list=True, encoding=encoding)
try:
while True:
try:
klasses = unpacker.unpack()
except msgpack.OutOfData:
return
if isinstance(klasses, int):
version = klasses
klasses = unpacker.unpack()
else:
version = 1
stores = unpacker.unpack()
doc = unpacker.read_bytes(unpacker.unpack())
instances = [unpacker.read_bytes(unpacker.unpack()) for i in xrange(len(stores))]
yield cls(version, klasses, stores, doc, instances, packed=True)
except msgpack.OutOfData:
if on_end == 'error':
raise
def write(self, out):
if self.version != 1:
msgpack.pack(self.version, out)
msgpack.pack(self.klasses, out)
msgpack.pack(self.stores, out)
doc = self.doc_packed
msgpack.pack(len(doc), out)
out.write(doc)
for insts in self.instances_packed:
msgpack.pack(len(insts), out)
out.write(insts)
def _get_doc(self):
if self._doc is None:
self._doc = msgpack.unpackb(self._doc_packed)
return self._doc
def _set_doc(self, doc):
self._doc = doc
self._doc_packed = None
doc = property(_get_doc, _set_doc)
def _get_instances(self):
if self._instances is None:
self._instances = [msgpack.unpackb(store) for store in self._instances_packed]
return self._instances
def _set_instances(self, instances):
self._instances = instances
self._instances_packed = None
instances = property(_get_instances, _set_instances)
def _get_doc_packed(self):
if self._doc_packed is None:
self._doc_packed = msgpack.packb(self._doc)
return self._doc_packed
def _set_doc_packed(self, doc):
self._doc_packed = doc
self._doc = None
doc_packed = property(_get_doc_packed, _set_doc_packed)
def _get_instances_packed(self):
if self._instances_packed is None:
self._instances_packed = [msgpack.packb(store) for store in self._instances]
return self._instances_packed
def _set_instances_packed(self, instances):
self._instances_packed = instances
self._instances = None
instances_packed = property(_get_instances_packed, _set_instances_packed)
def read_raw_docs(unpacker, on_end='error'):
return RawDoc.from_stream(unpacker, on_end=on_end)
def write_raw_doc(out, doc):
doc.write(out)
class RawDocWriter(object):
def __init__(self, out):
self.out = out
def write(self, doc):
doc.write(self.out)
def import_string(name):
path, base = name.rsplit('.', 1)
return getattr(__import__(path, globals=None, fromlist=[base]), base)
| 28.507463
| 104
| 0.682199
|
ab1889ec2380dc545e61c96a1fc22dae995ab888
| 3,036
|
py
|
Python
|
mt/config/get_config.py
|
Summer-2077/nlp-machine_translation
|
1c69657badb21ef4672c18c712264dac79766703
|
[
"Apache-2.0"
] | 2
|
2021-04-05T05:57:24.000Z
|
2021-12-13T14:28:45.000Z
|
mt/config/get_config.py
|
Summer-2077/nlp-machine_translation
|
1c69657badb21ef4672c18c712264dac79766703
|
[
"Apache-2.0"
] | null | null | null |
mt/config/get_config.py
|
Summer-2077/nlp-machine_translation
|
1c69657badb21ef4672c18c712264dac79766703
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
json_path = os.path.join(os.path.dirname(__file__), 'config.json') # 配置文件路径
def get_config_json(config_file='main.json'):
with open(config_file, 'r') as file:
return json.load(file)
conf = get_config_json(json_path)
# 对各变量赋值
num_validate_sentences = conf["num_validate_sentences"] # 用来验证的句子数量
path_to_val_file = conf["path_to_val_file"] # 验证集文本路径
validation_data = conf["validation_data"] # 是否从给定文本数据集用来验证
checkpoint_name = conf['checkpoint_name'] # 检查点名字
validation_freq = conf['validation_freq'] # 验证频率,即每训练几个epoch进行验证
checkpoints_save_freq = conf['checkpoints_save_freq'] # 检查点保存频率
max_checkpoints_num = conf['max_checkpoints_num'] # 保存最大检查点数量
source_lang = conf['source_lang'] # 源语言
target_lang = conf['target_lang'] # 目标语言
reverse = conf['reverse'] # 是否对语料语言对翻转
en_tokenize_type = conf['en_tokenize_type'] # 英文分词类型,可选:BPE/TOKENIZE
zh_tokenize_type = conf['zh_tokenize_type'] # 中文分词类型,可选:TOKENIZE
tokenizer_path_prefix = conf["tokenizer_path_prefix"] # 字典保存路径前缀
encoded_sequences_path_prefix = conf['encoded_sequences_path_prefix'] # 编码句子保存路径前缀
result_save_dir = conf['result_save_dir'] # 训练过程指标变化图保存路径
path_to_train_file = conf['path_to_train_file'] # 用于训练的文本路径
path_to_eval_file = conf['path_to_eval_file'] # 用于评估计算指标的文本路径
num_eval = conf['num_eval'] # 用于计算指标的句子对数量
checkpoint_path = os.path.join(conf["checkpoint_path_dir"], conf['source_lang']+'_'+conf['target_lang']) # 检查点路径
BUFFER_SIZE = conf['BUFFER_SIZE']
BATCH_SIZE = conf['BATCH_SIZE']
train_size = conf['train_size'] # 训练数据中test数据占比
num_sentences = conf["num_sentences"] # 用于训练的句子对数量
num_layers = conf["num_layers"] # encoder 与 decoder 中包含的 encoder 与 decoder 层数
d_model = conf["d_model"] # embedding 的维度
dff = conf["dff"] # 点式前馈网络(Point wise feed forward network)第一层dense的维度
num_heads = conf["num_heads"] # 多头注意力的头数
dropout_rate = conf["dropout_rate"]
EPOCHS = conf["EPOCHS"] # 训练轮次
max_target_length = conf['max_target_length'] # 最大生成目标句子长度
target_vocab_size = conf["target_vocab_size"] # 英语分词target_vocab_size
start_word = conf["start_word"] # 句子开始标志
end_word = conf["end_word"] # 句子结束标志
BEAM_SIZE = conf["BEAM_SIZE"] # BEAM_SIZE
checkpoint_ensembling = conf["checkpoint_ensembling"] # 是否采用checkpoint_ensembling
lm_path_to_train_file = conf["language_model"]["path_to_train_file_lm"] # 语言模型训练文本路径
lm_language = conf["language_model"]["language"]
lm_tokenize_type = conf["language_model"]["tokenize_type"]
lm_EPOCHS = conf["language_model"]["EPOCHS"]
lm_num_sentences = conf["language_model"]["num_sentences"]
lm_BATCH_SIZE = conf["language_model"]["BATCH_SIZE"]
lm_train_size = conf["language_model"]["train_size"]
lm_checkpoint_path = os.path.join(conf["checkpoint_path_dir"], 'lm')
lm_d_embedding = conf["language_model"]["d_embedding"]
lm_d_rnn = conf["language_model"]["d_rnn"]
lm_max_checkpoints_num = conf["language_model"]["max_checkpoints_num"]
lm_checkpoints_save_freq = conf["language_model"]["checkpoints_save_freq"]
lm_validation_freq = conf["language_model"]["validation_freq"]
| 46.707692
| 114
| 0.772727
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.