hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a0eb0a74426c9decbb532e805c646eb7db9b5ec6
| 527
|
py
|
Python
|
sovtoken/sovtoken/transactions.py
|
dastardlychimp/token-plugin
|
8755bce1424e794285ea6e70bf9bdd05518667c3
|
[
"Apache-2.0"
] | 9
|
2018-10-26T04:59:51.000Z
|
2021-06-10T13:30:51.000Z
|
sovtoken/sovtoken/transactions.py
|
dastardlychimp/token-plugin
|
8755bce1424e794285ea6e70bf9bdd05518667c3
|
[
"Apache-2.0"
] | 29
|
2018-09-11T17:07:00.000Z
|
2021-04-16T20:22:19.000Z
|
sovtoken/sovtoken/transactions.py
|
dastardlychimp/token-plugin
|
8755bce1424e794285ea6e70bf9bdd05518667c3
|
[
"Apache-2.0"
] | 26
|
2018-09-11T16:49:40.000Z
|
2021-06-06T10:43:55.000Z
|
from enum import Enum, unique
# DO NOT CHANGE ONCE CODE IS DEPLOYED ON THE LEDGER
PREFIX = '1000'
@unique
class TokenTransactions(Enum):
# These numeric constants CANNOT be changed once they have been used,
# because that would break backwards compatibility with the ledger
# Also the numeric constants CANNOT collide with other transactions hence a
# prefix is used
MINT_PUBLIC = PREFIX + '0'
XFER_PUBLIC = PREFIX + '1'
GET_UTXO = PREFIX + '2'
def __str__(self):
return self.name
| 27.736842
| 79
| 0.705882
|
2096cad32edfaeb8a1640e60926c437b2e785c7f
| 991
|
py
|
Python
|
hackerearth/Algorithms/Prison Break/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/Prison Break/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/Prison Break/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
t = int(input())
moves = ((-1, 0), (1, 0), (0, -1), (0, 1))
def escape(x, y, size, adjacency, seen):
if x == size - 1 and y == size - 1:
global count
count += 1
return
seen[x][y] = True
for mx, my in moves:
nx = x + mx
ny = y + my
if 0 <= nx < n and 0 <= ny < n and not seen[nx][ny] and not adjacency[nx][ny]:
escape(nx, ny, size, adjacency, seen)
seen[x][y] = False
return
for _ in range(t):
n = int(input())
prison = []
for _ in range(n):
prison.append(list(map(int, input().strip().split())))
visited = [[False] * n for _ in range(n)]
count = 0
escape(0, 0, n, prison, visited)
print(count)
| 25.410256
| 94
| 0.534813
|
5cccbd5939f1f42a237cf4074b32614211365952
| 4,034
|
py
|
Python
|
alipay/aop/api/request/AlipayCommerceIotDapplyOrderdeviceQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/request/AlipayCommerceIotDapplyOrderdeviceQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/request/AlipayCommerceIotDapplyOrderdeviceQueryRequest.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceIotDapplyOrderdeviceQueryModel import AlipayCommerceIotDapplyOrderdeviceQueryModel
class AlipayCommerceIotDapplyOrderdeviceQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceIotDapplyOrderdeviceQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceIotDapplyOrderdeviceQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.iot.dapply.orderdevice.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.82069
| 148
| 0.649975
|
8615633d8748e83e4a1b042f733ca2708e33b4eb
| 159
|
py
|
Python
|
test_ukz/__init__.py
|
clauderichard/Ultrakazoid
|
619f1afd1fd55afb06e7d27b2bc30eee9929f660
|
[
"MIT"
] | null | null | null |
test_ukz/__init__.py
|
clauderichard/Ultrakazoid
|
619f1afd1fd55afb06e7d27b2bc30eee9929f660
|
[
"MIT"
] | null | null | null |
test_ukz/__init__.py
|
clauderichard/Ultrakazoid
|
619f1afd1fd55afb06e7d27b2bc30eee9929f660
|
[
"MIT"
] | null | null | null |
from .test_melody import *
from .test_ukzlang import *
from .test_uklr import *
from .test_util import *
from .test_midi import *
from .test_scripts import *
| 19.875
| 27
| 0.767296
|
b16033fcda01a71fe03f60484bc2bb59deaeae30
| 4,174
|
py
|
Python
|
keras_textclassification/m11_SelfAttention/train.py
|
luoyudong593/Keras-TextClassification
|
b3e6966b5dbc7f425522074e2043fbff0614de84
|
[
"MIT"
] | 1,339
|
2019-06-13T15:34:46.000Z
|
2022-03-31T11:24:09.000Z
|
keras_textclassification/m11_SelfAttention/train.py
|
zhangshixing-chn/Keras-TextClassification
|
640e3f44f90d9d8046546f7e1a93a29ebe5c8d30
|
[
"MIT"
] | 75
|
2019-06-25T06:38:27.000Z
|
2022-03-25T06:48:19.000Z
|
keras_textclassification/m11_SelfAttention/train.py
|
zhangshixing-chn/Keras-TextClassification
|
640e3f44f90d9d8046546f7e1a93a29ebe5c8d30
|
[
"MIT"
] | 400
|
2019-06-17T03:00:48.000Z
|
2022-03-23T07:00:53.000Z
|
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/6/3 10:51
# @author :Mo
# @function :train of SelfAttention with baidu-qa-2019 in question title
# linux适配
import pathlib
import sys
import os
project_path = str(pathlib.Path(os.path.abspath(__file__)).parent.parent.parent)
sys.path.append(project_path)
# 地址
from keras_textclassification.conf.path_config import path_model, path_fineture, path_model_dir, path_hyper_parameters
# 训练验证数据地址
from keras_textclassification.conf.path_config import path_baidu_qa_2019_train, path_baidu_qa_2019_valid, path_root
# 数据预处理, 删除文件目录下文件
from keras_textclassification.data_preprocess.text_preprocess import PreprocessText, PreprocessSim, delete_file
# 模型图
from keras_textclassification.m11_SelfAttention.graph import SelfAttentionGraph as Graph
# 计算时间
import time
def train(hyper_parameters=None, rate=1.0):
"""
训练函数
:param hyper_parameters: json, 超参数
:param rate: 比率, 抽出rate比率语料取训练
:return: None
"""
if not hyper_parameters:
hyper_parameters = {
'len_max': 1376, # 句子最大长度, 固定 推荐20-50
'embed_size': 300, # 字/词向量维度
'vocab_size': 20000, # 这里随便填的,会根据代码里修改
'trainable': True, # embedding是静态的还是动态的, 即控制可不可以微调
'level_type': 'char', # 级别, 最小单元, 字/词, 填 'char' or 'word'
'embedding_type': 'random', # 级别, 嵌入类型, 还可以填'xlnet'、'random'、 'bert'、 'albert' or 'word2vec"
'gpu_memory_fraction': 0.76, # gpu使用率
'model': {'label': 23, # 类别数
'batch_size': 8, # 批处理尺寸, 感觉原则上越大越好,尤其是样本不均衡的时候, batch_size设置影响比较大
'dropout': 0.5, # 随机失活, 概率
'decay_step': 1000, # 学习率衰减step, 每N个step衰减一次
'decay_rate': 0.999, # 学习率衰减系数, 乘法
'epochs': 20, # 训练最大轮次
'patience': 3, # 早停,2-3就好
'lr': 1e-3, # 学习率, 对训练会有比较大的影响, 如果准确率一直上不去,可以考虑调这个参数
'l2': 1e-6, # l2正则化
'activate_classify': 'softmax', # 最后一个layer, 即分类激活函数
'loss': 'categorical_crossentropy', # 损失函数
'metrics': 'accuracy', # 保存更好模型的评价标准
'is_training': True, # 训练后者是测试模型
'model_path': path_model, # 模型地址, loss降低则保存的依据, save_best_only=True, save_weights_only=True
'path_fineture': path_fineture, # 保存embedding trainable地址, 例如字向量、词向量、bert向量等
'path_hyper_parameters': path_hyper_parameters, # 模型(包括embedding),超参数地址,
},
'embedding': {'layer_indexes': [12], # bert取的层数,
# 'corpus_path': '', # embedding预训练数据地址,不配则会默认取conf里边默认的地址, keras-bert可以加载谷歌版bert,百度版ernie(需转换,https://github.com/ArthurRizar/tensorflow_ernie),哈工大版bert-wwm(tf框架,https://github.com/ymcui/Chinese-BERT-wwm)
},
'data': {'train_data': path_baidu_qa_2019_train, # 训练数据
'val_data': path_baidu_qa_2019_valid # 验证数据
},
}
# 删除先前存在的模型\embedding微调模型等
delete_file(path_model_dir)
time_start = time.time()
# graph初始化
graph = Graph(hyper_parameters)
print("graph init ok!")
ra_ed = graph.word_embedding
# 数据预处理
pt = PreprocessSim(path_model_dir)
x_train, y_train = pt.preprocess_label_ques_to_idx(hyper_parameters['embedding_type'],
hyper_parameters['data']['train_data'],
ra_ed, rate=rate, shuffle=True)
x_val, y_val = pt.preprocess_label_ques_to_idx(hyper_parameters['embedding_type'],
hyper_parameters['data']['val_data'],
ra_ed, rate=rate, shuffle=True)
print("data propress ok!")
print(len(y_train))
# 训练
graph.fit(x_train, y_train, x_val, y_val)
print("耗时:" + str(time.time() - time_start))
if __name__ == "__main__":
train(rate=1)
| 45.369565
| 235
| 0.574509
|
73f72f03f4912ce619d063edafe3478144319f76
| 1,666
|
py
|
Python
|
src/python/nimbusml/examples/FastLinearBinaryClassifier.py
|
montehoover/NimbusML
|
f6be39ce9359786976429bab0ccd837e849b4ba5
|
[
"MIT"
] | 134
|
2018-11-01T22:15:24.000Z
|
2019-05-04T11:30:08.000Z
|
src/python/nimbusml/examples/FastLinearBinaryClassifier.py
|
montehoover/NimbusML
|
f6be39ce9359786976429bab0ccd837e849b4ba5
|
[
"MIT"
] | 226
|
2019-05-07T19:00:44.000Z
|
2021-01-06T07:59:48.000Z
|
src/python/nimbusml/examples/FastLinearBinaryClassifier.py
|
montehoover/NimbusML
|
f6be39ce9359786976429bab0ccd837e849b4ba5
|
[
"MIT"
] | 43
|
2019-05-15T20:19:42.000Z
|
2022-03-30T10:26:07.000Z
|
###############################################################################
# FastLinearBinaryClassifier
from nimbusml import Pipeline, FileDataStream
from nimbusml.datasets import get_dataset
from nimbusml.feature_extraction.categorical import OneHotVectorizer
from nimbusml.linear_model import FastLinearBinaryClassifier
# data input (as a FileDataStream)
path = get_dataset('infert').as_filepath()
data = FileDataStream.read_csv(path)
print(data.head())
# age case education induced parity ... row_num spontaneous ...
# 0 26 1 0-5yrs 1 6 ... 1 2 ...
# 1 42 1 0-5yrs 1 1 ... 2 0 ...
# 2 39 1 0-5yrs 2 6 ... 3 0 ...
# 3 34 1 0-5yrs 2 4 ... 4 0 ...
# 4 35 1 6-11yrs 1 3 ... 5 1 ...
# define the training pipeline
pipeline = Pipeline([
OneHotVectorizer(columns={'edu': 'education'}),
FastLinearBinaryClassifier(feature=['age', 'edu', 'induced'], label='case')
])
# train, predict, and evaluate
metrics, predictions = pipeline.fit(data).test(data, output_scores=True)
# print predictions
print(predictions.head())
# PredictedLabel Probability Score
# 0 0 0.360707 -0.572296
# 1 0 0.372283 -0.522437
# 2 0 0.376590 -0.504046
# 3 0 0.372939 -0.519627
# 4 0 0.329059 -0.712444
# print evaluation metrics
print(metrics)
# AUC Accuracy Positive precision Positive recall ...
# 0 0.496495 0.665323 0 0 ...
| 40.634146
| 79
| 0.532413
|
e4c71b09c2cbd57ef22c208cba3b98a5c7d7dcc7
| 884
|
py
|
Python
|
Django/learningTemplates/learningTemplates/urls.py
|
carlosmertens/Django-Full-Stack
|
e9713618936d6a90e098056cd6168d427dc91bd3
|
[
"MIT"
] | null | null | null |
Django/learningTemplates/learningTemplates/urls.py
|
carlosmertens/Django-Full-Stack
|
e9713618936d6a90e098056cd6168d427dc91bd3
|
[
"MIT"
] | null | null | null |
Django/learningTemplates/learningTemplates/urls.py
|
carlosmertens/Django-Full-Stack
|
e9713618936d6a90e098056cd6168d427dc91bd3
|
[
"MIT"
] | null | null | null |
"""learningTemplates URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from basicApp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name='index'),
path('basicApp/', include('basicApp.urls'))
]
| 35.36
| 77
| 0.707014
|
f3b6364dee2d7edc7bc8b71cc01426ad16c420dd
| 128,745
|
py
|
Python
|
blender/2.79/scripts/addons/space_view3d_spacebar_menu.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 2
|
2019-11-27T09:05:42.000Z
|
2020-02-20T01:25:23.000Z
|
blender/2.79/scripts/addons/space_view3d_spacebar_menu.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | null | null | null |
blender/2.79/scripts/addons/space_view3d_spacebar_menu.py
|
uzairakbar/bpy2.79
|
3a3e0004ac6783c4e4b89d939e4432de99026a85
|
[
"MIT"
] | 4
|
2020-02-19T20:02:26.000Z
|
2022-02-11T18:47:56.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Contributed to by: meta-androcto, JayDez, sim88, sam, lijenstina, mkb, wisaac, CoDEmanX #
bl_info = {
"name": "Dynamic Context Menu",
"author": "meta-androcto",
"version": (1, 8, 5),
"blender": (2, 77, 0),
"location": "View3D > Spacebar",
"description": "Object Mode Context Sensitive Spacebar Menu",
"warning": "",
"wiki_url": "https://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/3D_interaction/Dynamic_Spacebar_Menu",
"category": "3D View",
}
import bpy
from bpy.types import (
Operator,
Menu,
AddonPreferences,
)
from bpy.props import (
BoolProperty,
StringProperty,
)
from bl_ui.properties_paint_common import UnifiedPaintPanel
# Dynamic Context Sensitive Menu #
# Main Menu based on Object Type & 3d View Editor Mode #
class VIEW3D_MT_Space_Dynamic_Menu(Menu):
bl_label = "Dynamic Context Menu"
def draw(self, context):
layout = self.layout
settings = context.tool_settings
layout.operator_context = 'INVOKE_REGION_WIN'
obj = context.object
# No Object Selected #
if not context.active_object:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_View_Directions", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_View_Navigation", icon='ROTATE')
layout.menu("VIEW3D_MT_View_Toggle", icon='SPLITSCREEN')
layout.operator("view3d.snap_cursor_to_center",
text="Cursor to Center")
layout.operator("view3d.snap_cursor_to_grid",
text="Cursor to Grid")
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
if context.gpencil_data and context.gpencil_data.use_stroke_edit_mode:
layout.menu("VIEW3D_MT_Edit_Gpencil", icon='GREASEPENCIL')
# Mesh Object Mode #
if obj and obj.type == 'MESH' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
if context.gpencil_data and context.gpencil_data.use_stroke_edit_mode:
layout.menu("VIEW3D_MT_Edit_Gpencil", icon='GREASEPENCIL')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
layout.operator_menu_enum("object.modifier_add", "type", icon='MODIFIER')
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Mode", icon='EDIT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Mesh Edit Mode #
if obj and obj.type == 'MESH' and obj.mode in {'EDIT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Edit_Mesh", icon='RESTRICT_SELECT_OFF')
layout.menu("VIEW3D_MT_Edit_Multi", icon='VERTEXSEL')
UseSeparator(self, context)
layout.menu("INFO_MT_mesh_add", text="Add Mesh", icon='OUTLINER_OB_MESH')
layout.menu("VIEW3D_MT_Edit_Mesh", text="Mesh", icon='MESH_DATA')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenuEdit", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_EditCursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UV_Map", icon='MOD_UVPROJECT')
layout.menu("VIEW3D_MT_edit_mesh_specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_edit_mesh_extrude", icon='ORTHO')
UseSeparator(self, context)
layout.operator_menu_enum("object.modifier_add", "type", icon='MODIFIER')
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_edit_mesh_delete", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Mode", icon='EDIT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Sculpt Mode #
if obj and obj.type == 'MESH' and obj.mode in {'SCULPT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Sculpts", icon='SCULPTMODE_HLT')
layout.menu("VIEW3D_MT_Brush_Selection", text="Sculpt Tool", icon='BRUSH_SCULPT_DRAW')
layout.menu("VIEW3D_MT_Brush_Settings", icon='BRUSH_DATA')
layout.menu("VIEW3D_MT_Hide_Masks", icon='RESTRICT_VIEW_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Sculpt_Specials", icon='SOLO_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Mode", icon='EDIT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Vertex Paint #
if obj and obj.type == 'MESH' and obj.mode in {'VERTEX_PAINT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Brush_Settings", icon='BRUSH_DATA')
layout.menu("VIEW3D_MT_Brush_Selection",
text="Vertex Paint Tool", icon='BRUSH_VERTEXDRAW')
layout.menu("VIEW3D_MT_Vertex_Colors", icon='GROUP_VCOL')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Mode", icon='EDIT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Weight Paint Menu #
if obj and obj.type == 'MESH' and obj.mode in {'WEIGHT_PAINT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Paint_Weights", icon='WPAINT_HLT')
layout.menu("VIEW3D_MT_Brush_Settings", icon='BRUSH_DATA')
layout.menu("VIEW3D_MT_Brush_Selection",
text="Weight Paint Tool", icon='BRUSH_TEXMASK')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Mode", icon='EDIT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Texture Paint #
if obj and obj.type == 'MESH' and obj.mode in {'TEXTURE_PAINT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Brush_Settings", icon='BRUSH_DATA')
layout.menu("VIEW3D_MT_Brush_Selection",
text="Texture Paint Tool", icon='SCULPTMODE_HLT')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Mode", icon='EDIT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Curve Object Mode #
if obj and obj.type == 'CURVE' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
UseSeparator(self, context)
layout.operator_menu_enum("object.modifier_add", "type", icon='MODIFIER')
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Other", icon='OBJECT_DATA')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Edit Curve #
if obj and obj.type == 'CURVE' and obj.mode in {'EDIT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Edit_Curve",
icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("INFO_MT_curve_add", text="Add Curve",
icon='OUTLINER_OB_CURVE')
layout.menu("VIEW3D_MT_Edit_Curve", icon='CURVE_DATA')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
layout.menu("VIEW3D_MT_EditCurveCtrlpoints",
icon='CURVE_BEZCURVE')
layout.menu("VIEW3D_MT_EditCurveSpecials",
icon='SOLO_OFF')
UseSeparator(self, context)
layout.operator("curve.delete", text="Delete Object",
icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Other", icon='OBJECT_DATA')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Surface Object Mode #
if obj and obj.type == 'SURFACE' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
layout.operator_menu_enum("object.modifier_add", "type", icon='MODIFIER')
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Other", icon='OBJECT_DATA')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Edit Surface #
if obj and obj.type == 'SURFACE' and obj.mode in {'EDIT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Edit_Surface", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("INFO_MT_surface_add", text="Add Surface",
icon='OUTLINER_OB_SURFACE')
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.prop_menu_enum(settings, "proportional_edit",
icon="PROP_CON")
layout.prop_menu_enum(settings, "proportional_edit_falloff",
icon="SMOOTHCURVE")
layout.menu("VIEW3D_MT_EditCurveSpecials",
icon='SOLO_OFF')
UseSeparator(self, context)
layout.operator("curve.delete", text="Delete Object",
icon='CANCEL')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Other", icon='OBJECT_DATA')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Metaball Object Mode #
if obj and obj.type == 'META' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
UseSeparator(self, context)
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Other", icon='OBJECT_DATA')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Edit Metaball #
if obj and obj.type == 'META' and obj.mode in {'EDIT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_SelectMetaball", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.operator_menu_enum("object.metaball_add", "type",
text="Add Metaball",
icon='OUTLINER_OB_META')
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.prop_menu_enum(settings, "proportional_edit",
icon="PROP_CON")
layout.prop_menu_enum(settings, "proportional_edit_falloff",
icon="SMOOTHCURVE")
UseSeparator(self, context)
layout.operator("mball.delete_metaelems", text="Delete Object",
icon='CANCEL')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Other", icon='OBJECT_DATA')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Text Object Mode #
if obj and obj.type == 'FONT' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
UseSeparator(self, context)
layout.operator_menu_enum("object.modifier_add", "type", icon='MODIFIER')
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
# New Entry For Switching to Editmode
layout.operator("view3d.interactive_mode_text", icon='VIEW3D')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Text Edit Mode #
# To Do: Space is already reserved for the typing tool
if obj and obj.type == 'FONT' and obj.mode in {'EDIT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_select_edit_text", icon='VIEW3D')
layout.menu("VIEW3D_MT_edit_font", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.operator("object.editmode_toggle", text="Enter Object Mode",
icon='OBJECT_DATA')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Camera Object Mode #
if obj and obj.type == 'CAMERA' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_CursorMenuLite", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
UseSeparator(self, context)
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Lamp Object Mode #
if obj and obj.type == 'LAMP' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenuLite", icon='MANIPUL')
layout.menu("VIEW3D_MT_CursorMenuLite", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
UseSeparator(self, context)
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Armature Object Mode #
if obj and obj.type == 'ARMATURE' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenuArmature", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenuLite", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
UseSeparator(self, context)
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Armature", icon='VIEW3D')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Armature Edit #
if obj and obj.type == 'ARMATURE' and obj.mode in {'EDIT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Edit_Armature",
icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("INFO_MT_armature_add", text="Add Armature",
icon='OUTLINER_OB_ARMATURE')
layout.menu("VIEW3D_MT_Edit_Armature", text="Armature",
icon='OUTLINER_DATA_ARMATURE')
layout.menu("VIEW3D_MT_EditArmatureTK",
icon='ARMATURE_DATA')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenuArmatureEdit", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenuLite", icon='CURSOR')
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_armature_specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_edit_armature_roll",
icon='BONE_DATA')
UseSeparator(self, context)
layout.operator("armature.delete", text="Delete Object",
icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Armature", icon='VIEW3D')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Armature Pose #
if obj and obj.type == 'ARMATURE' and obj.mode in {'POSE'}:
arm = context.active_object.data
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Pose", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Pose", icon='OUTLINER_DATA_POSE')
layout.menu("VIEW3D_MT_TransformMenuArmaturePose", icon='MANIPUL')
layout.menu("VIEW3D_MT_pose_transform", icon='EMPTY_DATA')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_CursorMenuLite", icon='CURSOR')
layout.menu("VIEW3D_MT_PoseCopy", icon='FILE')
if arm.draw_type in {'BBONE', 'ENVELOPE'}:
layout.operator("transform.transform",
text="Scale Envelope Distance").mode = 'BONE_SIZE'
layout.menu("VIEW3D_MT_pose_apply", icon='AUTO')
layout.operator("pose.relax", icon='ARMATURE_DATA')
layout.menu("VIEW3D_MT_KeyframeMenu", icon='KEY_HLT')
layout.menu("VIEW3D_MT_pose_specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_pose_group", icon='GROUP_BONE')
UseSeparator(self, context)
layout.operator_menu_enum("pose.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT_BONE')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Armature", icon='VIEW3D')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Lattice Object Mode #
if obj and obj.type == 'LATTICE' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
UseSeparator(self, context)
layout.operator_menu_enum("object.modifier_add", "type", icon='MODIFIER')
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Other", icon='OBJECT_DATA')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Edit Lattice #
if obj and obj.type == 'LATTICE' and obj.mode in {'EDIT'}:
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Edit_Lattice",
icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
UseSeparator(self, context)
layout.prop_menu_enum(settings, "proportional_edit",
icon="PROP_CON")
layout.prop_menu_enum(settings, "proportional_edit_falloff",
icon="SMOOTHCURVE")
UseSeparator(self, context)
layout.operator("lattice.make_regular")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Other", icon='OBJECT_DATA')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Empty Object Mode #
if obj and obj.type == 'EMPTY' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenuLite", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenuLite", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_specials", text="Specials", icon='SOLO_OFF')
layout.menu("VIEW3D_MT_Camera_Options", icon='OUTLINER_OB_CAMERA')
UseSeparator(self, context)
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Speaker Object Mode #
if obj and obj.type == 'SPEAKER' and obj.mode in {'OBJECT'}:
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Object", icon='RESTRICT_SELECT_OFF')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
layout.menu("VIEW3D_MT_Object", icon='VIEW3D')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenuLite", icon='MANIPUL')
layout.menu("VIEW3D_MT_CursorMenuLite", icon='CURSOR')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
UseSeparator(self, context)
layout.operator_menu_enum("object.constraint_add",
"type", text="Add Constraint", icon='CONSTRAINT')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Particle Menu #
if obj and context.mode == 'PARTICLE':
layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Menu", icon='ZOOM_ALL')
layout.menu("VIEW3D_MT_Select_Particle",
icon='RESTRICT_SELECT_OFF')
layout.menu("VIEW3D_MT_Selection_Mode_Particle",
text="Select and Display Mode", icon='PARTICLE_PATH')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
layout.menu("VIEW3D_MT_CursorMenuLite", icon='CURSOR')
UseSeparator(self, context)
layout.prop_menu_enum(settings, "proportional_edit",
icon="PROP_CON")
layout.prop_menu_enum(settings, "proportional_edit_falloff",
icon="SMOOTHCURVE")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_particle", icon='PARTICLEMODE')
layout.menu("VIEW3D_MT_particle_specials", text="Hair Specials", icon='HAIR')
UseSeparator(self, context)
layout.operator("object.delete", text="Delete Object", icon='X')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_UndoS", icon='ARROW_LEFTRIGHT')
layout.menu("VIEW3D_MT_Object_Interactive_Mode", icon='VIEW3D')
UseSeparator(self, context)
layout.operator("view3d.toolshelf", icon='MENU_PANEL')
layout.operator("view3d.properties", icon='MENU_PANEL')
# Object Menus #
# ********** Object Menu **********
class VIEW3D_MT_Object(Menu):
bl_context = "objectmode"
bl_label = "Object"
def draw(self, context):
layout = self.layout
view = context.space_data
is_local_view = (view.local_view is not None)
layout.operator("object.delete", text="Delete...").use_global = False
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_parent")
layout.menu("VIEW3D_MT_Duplicate")
layout.operator("object.join")
if is_local_view:
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("object.move_to_layer", text="Move out of Local View")
layout.operator_context = 'INVOKE_REGION_WIN'
else:
layout.operator("object.move_to_layer", text="Move to Layer...")
layout.menu("VIEW3D_MT_make_links", text="Make Links...")
layout.menu("VIEW3D_MT_Object_Data_Link")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AutoSmooth", icon='ALIASED')
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_constraints")
layout.menu("VIEW3D_MT_object_track")
layout.menu("VIEW3D_MT_object_animation")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_game")
layout.menu("VIEW3D_MT_object_showhide")
UseSeparator(self, context)
layout.operator_menu_enum("object.convert", "target")
# ********** Object Add **********
class VIEW3D_MT_AddMenu(Menu):
bl_label = "Add Object"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.menu("INFO_MT_mesh_add", text="Add Mesh",
icon='OUTLINER_OB_MESH')
layout.menu("INFO_MT_curve_add", text="Add Curve",
icon='OUTLINER_OB_CURVE')
layout.menu("INFO_MT_surface_add", text="Add Surface",
icon='OUTLINER_OB_SURFACE')
layout.operator_menu_enum("object.metaball_add", "type",
icon='OUTLINER_OB_META')
layout.operator("object.text_add", text="Add Text",
icon='OUTLINER_OB_FONT')
UseSeparator(self, context)
layout.menu("INFO_MT_armature_add", text="Add Armature",
icon='OUTLINER_OB_ARMATURE')
layout.operator("object.add", text="Lattice",
icon='OUTLINER_OB_LATTICE').type = 'LATTICE'
layout.operator_menu_enum("object.empty_add", "type", text="Empty", icon='OUTLINER_OB_EMPTY')
UseSeparator(self, context)
layout.operator("object.speaker_add", text="Speaker", icon='OUTLINER_OB_SPEAKER')
UseSeparator(self, context)
layout.operator("object.camera_add", text="Camera",
icon='OUTLINER_OB_CAMERA')
layout.operator_menu_enum("object.lamp_add", "type",
icon="OUTLINER_OB_LAMP")
UseSeparator(self, context)
layout.operator_menu_enum("object.effector_add", "type",
text="Force Field",
icon='FORCE_FORCE')
layout.menu("VIEW3D_MT_object_quick_effects", text="Quick Effects", icon='PARTICLES')
UseSeparator(self, context)
layout.operator_menu_enum("object.group_instance_add", "group",
text="Group Instance",
icon='GROUP_VERTEX')
# ********** Object Manipulator **********
class VIEW3D_MT_ManipulatorMenu1(Menu):
bl_label = "Manipulator"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
props = layout.operator("view3d.enable_manipulator", text='Translate', icon='MAN_TRANS')
props.translate = True
props = layout.operator("view3d.enable_manipulator", text='Rotate', icon='MAN_ROT')
props.rotate = True
props = layout.operator("view3d.enable_manipulator", text='Scale', icon='MAN_SCALE')
props.scale = True
UseSeparator(self, context)
props = layout.operator("view3d.enable_manipulator", text='Combo', icon='MAN_SCALE')
props.scale = True
props.rotate = True
props.translate = True
props = layout.operator("view3d.enable_manipulator", text='Hide', icon='MAN_SCALE')
props.scale = False
props.rotate = False
props.translate = False
# ********** Object Mirror **********
class VIEW3D_MT_MirrorMenu(Menu):
bl_label = "Mirror"
def draw(self, context):
layout = self.layout
layout.operator("transform.mirror", text="Interactive Mirror")
UseSeparator(self, context)
layout.operator_context = 'INVOKE_REGION_WIN'
props = layout.operator("transform.mirror", text="X Global")
props.constraint_axis = (True, False, False)
props.constraint_orientation = 'GLOBAL'
props = layout.operator("transform.mirror", text="Y Global")
props.constraint_axis = (False, True, False)
props.constraint_orientation = 'GLOBAL'
props = layout.operator("transform.mirror", text="Z Global")
props.constraint_axis = (False, False, True)
props.constraint_orientation = 'GLOBAL'
if context.edit_object:
UseSeparator(self, context)
props = layout.operator("transform.mirror", text="X Local")
props.constraint_axis = (True, False, False)
props.constraint_orientation = 'LOCAL'
props = layout.operator("transform.mirror", text="Y Local")
props.constraint_axis = (False, True, False)
props.constraint_orientation = 'LOCAL'
props = layout.operator("transform.mirror", text="Z Local")
props.constraint_axis = (False, False, True)
props.constraint_orientation = 'LOCAL'
UseSeparator(self, context)
layout.operator("object.vertex_group_mirror")
# ********** Object Snap Cursor **********
class VIEW3D_MT_Pivot(Menu):
bl_label = "Pivot"
def draw(self, context):
layout = self.layout
layout.prop(context.space_data, "pivot_point", expand=True)
if context.active_object.mode == 'OBJECT':
UseSeparator(self, context)
layout.prop(context.space_data, "use_pivot_point_align", text="Center Points")
class VIEW3D_Snap_Context(Menu):
bl_label = "Snapping"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
layout.prop(toolsettings, "snap_element", expand=True)
layout.prop(toolsettings, "use_snap")
class VIEW3D_Snap_Origin(Menu):
bl_label = "Snap "
def draw(self, context):
layout = self.layout
layout.operator_context = 'EXEC_AREA'
layout.operator("object.origin_set",
text="Geometry to Origin").type = 'GEOMETRY_ORIGIN'
UseSeparator(self, context)
layout.operator("object.origin_set",
text="Origin to Geometry").type = 'ORIGIN_GEOMETRY'
layout.operator("object.origin_set",
text="Origin to 3D Cursor").type = 'ORIGIN_CURSOR'
layout.operator("object.origin_set",
text="Origin to Center of Mass").type = 'ORIGIN_CENTER_OF_MASS'
class VIEW3D_MT_CursorMenu(Menu):
bl_label = "Snap Cursor"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.menu("VIEW3D_Snap_Origin")
layout.menu("VIEW3D_Snap_Context")
UseSeparator(self, context)
layout.operator("view3d.snap_cursor_to_selected",
text="Cursor to Selected")
layout.operator("view3d.snap_cursor_to_center",
text="Cursor to Center")
layout.operator("view3d.snap_cursor_to_grid",
text="Cursor to Grid")
layout.operator("view3d.snap_cursor_to_active",
text="Cursor to Active")
UseSeparator(self, context)
layout.operator("view3d.snap_selected_to_cursor", text="Selection to Cursor").use_offset = False
layout.operator("view3d.snap_selected_to_cursor", text="Selection to Cursor (Offset)").use_offset = True
layout.operator("view3d.snap_selected_to_grid",
text="Selection to Grid")
layout.operator("view3d.snap_cursor_selected_to_center",
text="Selection and Cursor to Center")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Pivot")
layout.operator("view3d.pivot_cursor",
text="Set Cursor as Pivot Point")
layout.operator("view3d.revert_pivot",
text="Revert Pivot Point")
class VIEW3D_MT_CursorMenuLite(Menu):
bl_label = "Snap Cursor"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.menu("VIEW3D_Snap_Origin")
UseSeparator(self, context)
layout.operator("view3d.snap_cursor_to_selected",
text="Cursor to Selected")
layout.operator("view3d.snap_cursor_to_center",
text="Cursor to Center")
layout.operator("view3d.snap_cursor_to_grid",
text="Cursor to Grid")
layout.operator("view3d.snap_cursor_to_active",
text="Cursor to Active")
UseSeparator(self, context)
layout.operator("view3d.snap_selected_to_cursor", text="Selection to Cursor").use_offset = False
layout.operator("view3d.snap_selected_to_cursor", text="Selection to Cursor (Offset)").use_offset = True
layout.operator("view3d.snap_selected_to_grid",
text="Selection to Grid")
layout.operator("view3d.snap_cursor_selected_to_center",
text="Selection and Cursor to Center")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Pivot")
layout.operator("view3d.pivot_cursor",
text="Set Cursor as Pivot Point")
layout.operator("view3d.revert_pivot",
text="Revert Pivot Point")
# ********** Object Interactive Mode **********
class InteractiveMode(Menu):
bl_idname = "VIEW3D_MT_Object_Interactive_Mode"
bl_label = "Interactive Mode"
bl_description = "Menu of objects' interactive modes (Window Types)"
def draw(self, context):
layout = self.layout
obj = context.active_object
psys = hasattr(obj, "particle_systems")
psys_items = len(obj.particle_systems.items()) > 0 if psys else False
layout.operator(SetObjectMode.bl_idname, text="Object", icon="OBJECT_DATAMODE").mode = "OBJECT"
layout.operator(SetObjectMode.bl_idname, text="Edit", icon="EDITMODE_HLT").mode = "EDIT"
layout.operator(SetObjectMode.bl_idname, text="Sculpt", icon="SCULPTMODE_HLT").mode = "SCULPT"
layout.operator(SetObjectMode.bl_idname, text="Vertex Paint", icon="VPAINT_HLT").mode = "VERTEX_PAINT"
layout.operator(SetObjectMode.bl_idname, text="Weight Paint", icon="WPAINT_HLT").mode = "WEIGHT_PAINT"
layout.operator(SetObjectMode.bl_idname, text="Texture Paint", icon="TPAINT_HLT").mode = "TEXTURE_PAINT"
if obj and psys_items:
layout.operator(SetObjectMode.bl_idname, text="Particle Edit",
icon="PARTICLEMODE").mode = "PARTICLE_EDIT"
if context.gpencil_data:
layout.operator("view3d.interactive_mode_grease_pencil", icon="GREASEPENCIL")
# ********** Object Armature Interactive Mode **********
class InteractiveModeArmature(Menu):
bl_idname = "VIEW3D_MT_Object_Interactive_Armature"
bl_label = "Interactive Mode"
bl_description = "Menu of objects interactive mode"
def draw(self, context):
layout = self.layout
layout.operator(SetObjectMode.bl_idname, text="Object", icon="OBJECT_DATAMODE").mode = "OBJECT"
layout.operator(SetObjectMode.bl_idname, text="Edit", icon="EDITMODE_HLT").mode = "EDIT"
layout.operator(SetObjectMode.bl_idname, text="Pose", icon="POSE_HLT").mode = "POSE"
if context.gpencil_data:
layout.operator("view3d.interactive_mode_grease_pencil", icon="GREASEPENCIL")
# ********** Interactive Mode Other **********
class InteractiveModeOther(Menu):
bl_idname = "VIEW3D_MT_Object_Interactive_Other"
bl_label = "Interactive Mode"
bl_description = "Menu of objects interactive mode"
def draw(self, context):
layout = self.layout
layout.operator("object.editmode_toggle", text="Edit/Object Toggle",
icon='OBJECT_DATA')
if context.gpencil_data:
layout.operator("view3d.interactive_mode_grease_pencil", icon="GREASEPENCIL")
# ********** Grease Pencil Interactive Mode **********
class VIEW3D_OT_Interactive_Mode_Grease_Pencil(Operator):
bl_idname = "view3d.interactive_mode_grease_pencil"
bl_label = "Edit Strokes"
bl_description = "Toggle Edit Strokes for Grease Pencil"
@classmethod
def poll(cls, context):
return (context.gpencil_data is not None)
def execute(self, context):
try:
bpy.ops.gpencil.editmode_toggle()
except:
self.report({'WARNING'}, "It is not possible to enter into the interactive mode")
return {'FINISHED'}
class VIEW3D_MT_Edit_Gpencil(Menu):
bl_label = "GPencil"
def draw(self, context):
toolsettings = context.tool_settings
layout = self.layout
layout.operator("gpencil.brush_paint", text="Sculpt Strokes").wait_for_input = True
layout.prop_menu_enum(toolsettings.gpencil_sculpt, "tool", text="Sculpt Brush")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_edit_gpencil_transform")
layout.operator("transform.mirror", text="Mirror")
layout.menu("GPENCIL_MT_snap")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_animation") # NOTE: provides keyingset access...
UseSeparator(self, context)
layout.menu("VIEW3D_MT_edit_gpencil_delete")
layout.operator("gpencil.duplicate_move", text="Duplicate")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_select_gpencil")
UseSeparator(self, context)
layout.operator("gpencil.copy", text="Copy")
layout.operator("gpencil.paste", text="Paste")
UseSeparator(self, context)
layout.prop_menu_enum(toolsettings, "proportional_edit")
layout.prop_menu_enum(toolsettings, "proportional_edit_falloff")
UseSeparator(self, context)
layout.operator("gpencil.reveal")
layout.operator("gpencil.hide", text="Show Active Layer Only").unselected = True
layout.operator("gpencil.hide", text="Hide Active Layer").unselected = False
UseSeparator(self, context)
layout.operator_menu_enum("gpencil.move_to_layer", "layer", text="Move to Layer")
layout.operator_menu_enum("gpencil.convert", "type", text="Convert to Geometry...")
# ********** Text Interactive Mode **********
class VIEW3D_OT_Interactive_Mode_Text(Operator):
bl_idname = "view3d.interactive_mode_text"
bl_label = "Enter Edit Mode"
bl_description = "Toggle object's editmode"
@classmethod
def poll(cls, context):
return (context.active_object is not None)
def execute(self, context):
bpy.ops.object.editmode_toggle()
self.report({'INFO'}, "Spacebar shortcut won't work in the Text Edit mode")
return {'FINISHED'}
# ********** Object Parent **********
class VIEW3D_MT_ParentMenu(Menu):
bl_label = "Parent"
def draw(self, context):
layout = self.layout
layout.operator("object.parent_set", text="Set")
layout.operator("object.parent_clear", text="Clear")
# ********** Object Group **********
class VIEW3D_MT_GroupMenu(Menu):
bl_label = "Group"
def draw(self, context):
layout = self.layout
layout.operator("group.create")
layout.operator("group.objects_add_active")
UseSeparator(self, context)
layout.operator("group.objects_remove")
layout.operator("group.objects_remove_all")
layout.operator("group.objects_remove_active")
# ********** Object Camera Options **********
class VIEW3D_MT_Camera_Options(Menu):
bl_label = "Camera"
def draw(self, context):
layout = self.layout
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("object.camera_add", text="Add Camera", icon='OUTLINER_OB_CAMERA')
self.layout.operator("view3d.object_as_camera", text="Object As Camera", icon='OUTLINER_OB_CAMERA')
self.layout.operator("view3d.viewnumpad", text="View Active Camera",
icon='OUTLINER_OB_CAMERA').type = 'CAMERA'
class VIEW3D_MT_Object_Data_Link(Menu):
bl_label = "Object Data"
def draw(self, context):
layout = self.layout
layout.operator_menu_enum("object.make_local", "type", text="Make Local...")
layout.menu("VIEW3D_MT_make_single_user")
layout.operator("object.proxy_make", text="Make Proxy...")
layout.operator("object.make_dupli_face")
UseSeparator(self, context)
layout.operator("object.data_transfer")
layout.operator("object.datalayout_transfer")
class VIEW3D_MT_Duplicate(Menu):
bl_label = "Duplicate"
def draw(self, context):
layout = self.layout
layout.operator("object.duplicate_move")
layout.operator("object.duplicate_move_linked")
class VIEW3D_MT_KeyframeMenu(Menu):
bl_label = "Keyframe"
def draw(self, context):
layout = self.layout
layout.operator("anim.keyframe_insert_menu",
text="Insert Keyframe...")
layout.operator("anim.keyframe_delete_v3d",
text="Delete Keyframe...")
layout.operator("anim.keying_set_active_set",
text="Change Keying Set...")
class VIEW3D_MT_UndoS(Menu):
bl_label = "Undo/Redo"
def draw(self, context):
layout = self.layout
layout.operator("ed.undo")
layout.operator("ed.redo")
UseSeparator(self, context)
layout.operator("ed.undo_history")
# ********** Normals / Auto Smooth Menu **********
# Thanks to marvin.k.breuer for the Autosmooth part of the menu
class VIEW3D_MT_AutoSmooth(Menu):
bl_label = "Normals / Auto Smooth"
def draw(self, context):
layout = self.layout
obj = context.object
obj_data = context.active_object.data
# moved the VIEW3D_MT_edit_mesh_normals contents here under an Edit mode check
if obj and obj.type == 'MESH' and obj.mode in {'EDIT'}:
layout.operator("mesh.normals_make_consistent",
text="Recalculate Outside").inside = False
layout.operator("mesh.normals_make_consistent",
text="Recalculate Inside").inside = True
layout.operator("mesh.flip_normals")
UseSeparator(self, context)
layout.prop(obj_data, "show_double_sided", text="Normals: Double Sided")
UseSeparator(self, context)
layout.prop(obj_data, "use_auto_smooth", text="Normals: Auto Smooth")
# Auto Smooth Angle - two tab spaces to align it with the rest of the menu
layout.prop(obj_data, "auto_smooth_angle",
text=" Auto Smooth Angle")
# Edit Mode Menu's #
# ********** Edit Mesh **********
class VIEW3D_MT_Edit_Mesh(Menu):
bl_label = "Mesh"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
view = context.space_data
layout.menu("VIEW3D_MT_edit_mesh_vertices", icon='VERTEXSEL')
layout.menu("VIEW3D_MT_edit_mesh_edges", icon='EDGESEL')
layout.menu("VIEW3D_MT_edit_mesh_faces", icon='FACESEL')
UseSeparator(self, context)
layout.operator("mesh.duplicate_move")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_edit_mesh_clean", icon='AUTO')
layout.prop(view, "use_occlude_geometry")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_AutoSmooth", icon='META_DATA')
layout.operator("mesh.loopcut_slide",
text="Loopcut", icon='UV_EDGESEL')
UseSeparator(self, context)
layout.operator("mesh.symmetrize")
layout.operator("mesh.symmetry_snap")
UseSeparator(self, context)
layout.operator("mesh.bisect")
layout.operator_menu_enum("mesh.sort_elements", "type", text="Sort Elements...")
UseSeparator(self, context)
layout.prop_menu_enum(toolsettings, "proportional_edit")
layout.prop_menu_enum(toolsettings, "proportional_edit_falloff")
UseSeparator(self, context)
layout.prop(toolsettings, "use_mesh_automerge")
# Double Threshold - two tab spaces to align it with the rest of the menu
layout.prop(toolsettings, "double_threshold", text=" Double Threshold")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_edit_mesh_showhide")
# ********** Edit Multiselect **********
class VIEW3D_MT_Edit_Multi(Menu):
bl_label = "Multi Select"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
prop = layout.operator("wm.context_set_value", text="Vertex Select",
icon='VERTEXSEL')
prop.value = "(True, False, False)"
prop.data_path = "tool_settings.mesh_select_mode"
prop = layout.operator("wm.context_set_value", text="Edge Select",
icon='EDGESEL')
prop.value = "(False, True, False)"
prop.data_path = "tool_settings.mesh_select_mode"
prop = layout.operator("wm.context_set_value", text="Face Select",
icon='FACESEL')
prop.value = "(False, False, True)"
prop.data_path = "tool_settings.mesh_select_mode"
UseSeparator(self, context)
prop = layout.operator("wm.context_set_value",
text="Vertex & Edge Select",
icon='EDITMODE_HLT')
prop.value = "(True, True, False)"
prop.data_path = "tool_settings.mesh_select_mode"
prop = layout.operator("wm.context_set_value",
text="Vertex & Face Select",
icon='ORTHO')
prop.value = "(True, False, True)"
prop.data_path = "tool_settings.mesh_select_mode"
prop = layout.operator("wm.context_set_value",
text="Edge & Face Select",
icon='SNAP_FACE')
prop.value = "(False, True, True)"
prop.data_path = "tool_settings.mesh_select_mode"
UseSeparator(self, context)
prop = layout.operator("wm.context_set_value",
text="Vertex & Edge & Face Select",
icon='SNAP_VOLUME')
prop.value = "(True, True, True)"
prop.data_path = "tool_settings.mesh_select_mode"
# ********** Edit Mesh Edge **********
class VIEW3D_MT_EditM_Edge(Menu):
bl_label = "Edges"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("mesh.mark_seam")
layout.operator("mesh.mark_seam", text="Clear Seam").clear = True
UseSeparator(self, context)
layout.operator("mesh.mark_sharp")
layout.operator("mesh.mark_sharp", text="Clear Sharp").clear = True
layout.operator("mesh.extrude_move_along_normals", text="Extrude")
UseSeparator(self, context)
layout.operator("mesh.edge_rotate",
text="Rotate Edge CW").direction = 'CW'
layout.operator("mesh.edge_rotate",
text="Rotate Edge CCW").direction = 'CCW'
UseSeparator(self, context)
layout.operator("TFM_OT_edge_slide", text="Edge Slide")
layout.operator("mesh.loop_multi_select", text="Edge Loop")
layout.operator("mesh.loop_multi_select", text="Edge Ring").ring = True
layout.operator("mesh.loop_to_region")
layout.operator("mesh.region_to_loop")
# ********** Edit Mesh Cursor **********
class VIEW3D_MT_EditCursorMenu(Menu):
bl_label = "Snap Cursor"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("object.setorigintoselected",
text="Origin to Selected V/F/E")
UseSeparator(self, context)
layout.menu("VIEW3D_Snap_Origin")
layout.menu("VIEW3D_Snap_Context")
UseSeparator(self, context)
layout.operator("view3d.snap_cursor_to_selected",
text="Cursor to Selected")
layout.operator("view3d.snap_cursor_to_center",
text="Cursor to Center")
layout.operator("view3d.snap_cursor_to_grid",
text="Cursor to Grid")
layout.operator("view3d.snap_cursor_to_active",
text="Cursor to Active")
layout.operator("view3d.snap_cursor_to_edge_intersection",
text="Cursor to Edge Intersection")
UseSeparator(self, context)
layout.operator("view3d.snap_selected_to_cursor", text="Selection to Cursor").use_offset = False
layout.operator("view3d.snap_selected_to_cursor", text="Selection to Cursor (Offset)").use_offset = True
layout.operator("view3d.snap_selected_to_grid",
text="Selection to Grid")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Pivot")
layout.operator("view3d.pivot_cursor",
text="Set Cursor as Pivot Point")
layout.operator("view3d.revert_pivot",
text="Revert Pivot Point")
# ********** Edit Mesh UV **********
class VIEW3D_MT_UV_Map(Menu):
bl_label = "UV Mapping"
def draw(self, context):
layout = self.layout
layout.operator("uv.unwrap")
UseSeparator(self, context)
layout.operator_context = 'INVOKE_DEFAULT'
layout.operator("uv.smart_project")
layout.operator("uv.lightmap_pack")
layout.operator("uv.follow_active_quads")
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("uv.cube_project")
layout.operator("uv.cylinder_project")
layout.operator("uv.sphere_project")
layout.operator_context = 'INVOKE_REGION_WIN'
UseSeparator(self, context)
layout.operator("uv.project_from_view").scale_to_bounds = False
layout.operator("uv.project_from_view", text="Project from View (Bounds)").scale_to_bounds = True
UseSeparator(self, context)
layout.operator("uv.reset")
# ********** Edit Curve **********
class VIEW3D_MT_Edit_Curve(Menu):
bl_label = "Curve"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
layout.operator("curve.extrude_move")
layout.operator("curve.spin")
layout.operator("curve.duplicate_move")
layout.operator("curve.split")
layout.operator("curve.separate")
layout.operator("curve.make_segment")
layout.operator("curve.cyclic_toggle")
UseSeparator(self, context)
layout.operator("curve.delete", text="Delete...")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_edit_curve_segments")
layout.prop_menu_enum(toolsettings, "proportional_edit",
icon="PROP_CON")
layout.prop_menu_enum(toolsettings, "proportional_edit_falloff",
icon="SMOOTHCURVE")
layout.menu("VIEW3D_MT_edit_curve_showhide")
class VIEW3D_MT_EditCurveCtrlpoints(Menu):
bl_label = "Control Points"
def draw(self, context):
layout = self.layout
edit_object = context.edit_object
if edit_object.type == 'CURVE':
layout.operator("transform.transform").mode = 'TILT'
layout.operator("curve.tilt_clear")
layout.operator("curve.separate")
layout.operator_menu_enum("curve.handle_type_set", "type")
layout.menu("VIEW3D_MT_hook")
class VIEW3D_MT_EditCurveSegments(Menu):
bl_label = "Curve Segments"
def draw(self, context):
layout = self.layout
layout.operator("curve.subdivide")
layout.operator("curve.switch_direction")
class VIEW3D_MT_EditCurveSpecials(Menu):
bl_label = "Specials"
def draw(self, context):
layout = self.layout
layout.operator("curve.subdivide")
UseSeparator(self, context)
layout.operator("curve.switch_direction")
layout.operator("curve.spline_weight_set")
layout.operator("curve.radius_set")
UseSeparator(self, context)
layout.operator("curve.smooth")
layout.operator("curve.smooth_weight")
layout.operator("curve.smooth_radius")
layout.operator("curve.smooth_tilt")
# Brushes Menu's #
# Thanks to CoDEmanX for the code
class VIEW3D_MT_Brush_Selection(Menu):
bl_label = "Brush Tool"
def draw(self, context):
layout = self.layout
settings = UnifiedPaintPanel.paint_settings(context)
# check if brush exists (for instance, in paint mode before adding a slot)
if hasattr(settings, 'brush'):
brush = settings.brush
else:
brush = None
if not brush:
layout.label(text="No Brushes currently available", icon="INFO")
return
if not context.particle_edit_object:
if UseBrushesLists():
flow = layout.column_flow(columns=3)
for brsh in bpy.data.brushes:
if (context.sculpt_object and brsh.use_paint_sculpt):
props = flow.operator("wm.context_set_id", text=brsh.name,
icon_value=layout.icon(brsh))
props.data_path = "tool_settings.sculpt.brush"
props.value = brsh.name
elif (context.image_paint_object and brsh.use_paint_image):
props = flow.operator("wm.context_set_id", text=brsh.name,
icon_value=layout.icon(brsh))
props.data_path = "tool_settings.image_paint.brush"
props.value = brsh.name
elif (context.vertex_paint_object and brsh.use_paint_vertex):
props = flow.operator("wm.context_set_id", text=brsh.name,
icon_value=layout.icon(brsh))
props.data_path = "tool_settings.vertex_paint.brush"
props.value = brsh.name
elif (context.weight_paint_object and brsh.use_paint_weight):
props = flow.operator("wm.context_set_id", text=brsh.name,
icon_value=layout.icon(brsh))
props.data_path = "tool_settings.weight_paint.brush"
props.value = brsh.name
else:
layout.template_ID_preview(settings, "brush", new="brush.add", rows=3, cols=8)
class VIEW3D_MT_Brush_Settings(Menu):
bl_label = "Brush Settings"
def draw(self, context):
layout = self.layout
settings = UnifiedPaintPanel.paint_settings(context)
brush = getattr(settings, "brush", None)
ups = context.tool_settings.unified_paint_settings
layout.prop(ups, "use_unified_size", text="Unified Size")
layout.prop(ups, "use_unified_strength", text="Unified Strength")
if context.image_paint_object or context.vertex_paint_object:
layout.prop(ups, "use_unified_color", text="Unified Color")
UseSeparator(self, context)
if not brush:
layout.label(text="No Brushes currently available", icon="INFO")
return
layout.menu("VIEW3D_MT_brush_paint_modes")
if context.sculpt_object:
sculpt_tool = brush.sculpt_tool
UseSeparator(self, context)
layout.operator_menu_enum("brush.curve_preset", "shape", text="Curve Preset")
UseSeparator(self, context)
if sculpt_tool != 'GRAB':
layout.prop_menu_enum(brush, "stroke_method")
if sculpt_tool in {'DRAW', 'PINCH', 'INFLATE', 'LAYER', 'CLAY'}:
layout.prop_menu_enum(brush, "direction")
if sculpt_tool == 'LAYER':
layout.prop(brush, "use_persistent")
layout.operator("sculpt.set_persistent_base")
# Sculpt Menu's #
class VIEW3D_MT_Sculpts(Menu):
bl_label = "Sculpt"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
sculpt = toolsettings.sculpt
layout.prop(sculpt, "use_symmetry_x")
layout.prop(sculpt, "use_symmetry_y")
layout.prop(sculpt, "use_symmetry_z")
UseSeparator(self, context)
layout.prop(sculpt, "lock_x")
layout.prop(sculpt, "lock_y")
layout.prop(sculpt, "lock_z")
UseSeparator(self, context)
layout.prop(sculpt, "use_threaded", text="Threaded Sculpt")
layout.prop(sculpt, "show_low_resolution")
layout.prop(sculpt, "use_deform_only")
UseSeparator(self, context)
layout.prop(sculpt, "show_brush")
layout.prop(sculpt, "show_diffuse_color")
class VIEW3D_MT_Hide_Masks(Menu):
bl_label = "Hide/Mask"
def draw(self, context):
layout = self.layout
props = layout.operator("paint.mask_lasso_gesture", text="Lasso Mask")
UseSeparator(self, context)
props = layout.operator("view3d.select_border", text="Box Mask", icon="BORDER_RECT")
props = layout.operator("paint.hide_show", text="Box Hide")
props.action = 'HIDE'
props.area = 'INSIDE'
props = layout.operator("paint.hide_show", text="Box Show")
props.action = 'SHOW'
props.area = 'INSIDE'
UseSeparator(self, context)
props = layout.operator("paint.mask_flood_fill", text="Fill Mask", icon="BORDER_RECT")
props.mode = 'VALUE'
props.value = 1
props = layout.operator("paint.mask_flood_fill", text="Clear Mask")
props.mode = 'VALUE'
props.value = 0
layout.operator("paint.mask_flood_fill", text="Invert Mask").mode = 'INVERT'
UseSeparator(self, context)
props = layout.operator("paint.hide_show", text="Show All", icon="RESTRICT_VIEW_OFF")
props.action = 'SHOW'
props.area = 'ALL'
props = layout.operator("paint.hide_show", text="Hide Masked", icon="RESTRICT_VIEW_ON")
props.area = 'MASKED'
props.action = 'HIDE'
# Sculpt Specials Menu (Thanks to marvin.k.breuer) #
class VIEW3D_MT_Sculpt_Specials(Menu):
bl_label = "Sculpt Specials"
def draw(self, context):
layout = self.layout
settings = context.tool_settings
if context.sculpt_object.use_dynamic_topology_sculpting:
layout.operator("sculpt.dynamic_topology_toggle",
icon='X', text="Disable Dyntopo")
UseSeparator(self, context)
if (settings.sculpt.detail_type_method == 'CONSTANT'):
layout.prop(settings.sculpt, "constant_detail", text="Const.")
layout.operator("sculpt.sample_detail_size", text="", icon='EYEDROPPER')
else:
layout.prop(settings.sculpt, "detail_size", text="Detail")
UseSeparator(self, context)
layout.operator("sculpt.symmetrize", icon='ARROW_LEFTRIGHT')
layout.prop(settings.sculpt, "symmetrize_direction", "")
UseSeparator(self, context)
layout.operator("sculpt.optimize")
if (settings.sculpt.detail_type_method == 'CONSTANT'):
layout.operator("sculpt.detail_flood_fill")
UseSeparator(self, context)
layout.prop(settings.sculpt, "detail_refine_method", text="")
layout.prop(settings.sculpt, "detail_type_method", text="")
UseSeparator(self, context)
layout.prop(settings.sculpt, "use_smooth_shading", "Smooth")
else:
layout.operator("sculpt.dynamic_topology_toggle",
icon='SCULPT_DYNTOPO', text="Enable Dyntopo")
# Display Wire (Thanks to marvin.k.breuer) #
class VIEW3D_OT_Display_Wire_All(Operator):
bl_label = "Wire on All Objects"
bl_idname = "view3d.display_wire_all"
bl_description = "Enable/Disable Display Wire on All Objects"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
is_error = False
for obj in bpy.data.objects:
try:
if obj.show_wire:
obj.show_all_edges = False
obj.show_wire = False
else:
obj.show_all_edges = True
obj.show_wire = True
except:
is_error = True
pass
if is_error:
self.report({'WARNING'},
"Wire on All Objects could not be completed for some objects")
return {'FINISHED'}
# Vertex Color Menu #
class VIEW3D_MT_Vertex_Colors(Menu):
bl_label = "Vertex Colors"
def draw(self, context):
layout = self.layout
layout.operator("paint.vertex_color_set")
UseSeparator(self, context)
layout.operator("paint.vertex_color_smooth")
layout.operator("paint.vertex_color_dirt")
# Weight Paint Menu #
class VIEW3D_MT_Paint_Weights(Menu):
bl_label = "Weights"
def draw(self, context):
layout = self.layout
layout.operator("paint.weight_from_bones",
text="Assign Automatic From Bones").type = 'AUTOMATIC'
layout.operator("paint.weight_from_bones",
text="Assign From Bone Envelopes").type = 'ENVELOPES'
UseSeparator(self, context)
layout.operator("object.vertex_group_normalize_all", text="Normalize All")
layout.operator("object.vertex_group_normalize", text="Normalize")
UseSeparator(self, context)
layout.operator("object.vertex_group_mirror", text="Mirror")
layout.operator("object.vertex_group_invert", text="Invert")
UseSeparator(self, context)
layout.operator("object.vertex_group_clean", text="Clean")
layout.operator("object.vertex_group_quantize", text="Quantize")
UseSeparator(self, context)
layout.operator("object.vertex_group_levels", text="Levels")
layout.operator("object.vertex_group_smooth", text="Smooth")
UseSeparator(self, context)
props = layout.operator("object.data_transfer", text="Transfer Weights")
props.use_reverse_transfer = True
props.data_type = 'VGROUP_WEIGHTS'
UseSeparator(self, context)
layout.operator("object.vertex_group_limit_total", text="Limit Total")
layout.operator("object.vertex_group_fix", text="Fix Deforms")
UseSeparator(self, context)
layout.operator("paint.weight_set")
# Armature Menu's #
class VIEW3D_MT_Edit_Armature(Menu):
bl_label = "Armature"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
layout.prop_menu_enum(toolsettings, "proportional_edit", icon="PROP_CON")
layout.prop_menu_enum(toolsettings, "proportional_edit_falloff", icon="SMOOTHCURVE")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")
layout.operator("armature.merge")
layout.operator("armature.fill")
layout.operator("armature.split")
layout.operator("armature.separate")
layout.operator("armature.switch_direction", text="Switch Direction")
layout.operator_context = 'EXEC_AREA'
layout.operator("armature.symmetrize")
UseSeparator(self, context)
layout.operator("armature.delete")
UseSeparator(self, context)
layout.operator_context = 'INVOKE_DEFAULT'
layout.operator("armature.armature_layers")
layout.operator("armature.bone_layers")
class VIEW3D_MT_EditArmatureTK(Menu):
bl_label = "Armature Tools"
def draw(self, context):
layout = self.layout
layout.operator("armature.subdivide", text="Subdivide")
layout.operator("armature.extrude_move")
layout.operator("armature.extrude_forked")
layout.operator("armature.duplicate_move")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_edit_armature_delete")
UseSeparator(self, context)
layout.operator("transform.transform",
text="Scale Envelope Distance").mode = 'BONE_SIZE'
layout.operator("transform.transform",
text="Scale B-Bone Width").mode = 'BONE_SIZE'
# Armature Pose Menu's #
class VIEW3D_MT_Pose(Menu):
bl_label = "Pose"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_object_animation")
layout.menu("VIEW3D_MT_pose_slide")
layout.menu("VIEW3D_MT_pose_propagate")
layout.menu("VIEW3D_MT_pose_library")
layout.menu("VIEW3D_MT_pose_motion")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_pose_group")
layout.menu("VIEW3D_MT_object_parent")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_pose_ik")
layout.menu("VIEW3D_MT_pose_constraints")
layout.menu("VIEW3D_MT_PoseNames")
layout.operator("pose.quaternions_flip")
layout.operator_context = 'INVOKE_AREA'
UseSeparator(self, context)
layout.operator("armature.armature_layers", text="Change Armature Layers...")
layout.operator("pose.bone_layers", text="Change Bone Layers...")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_pose_showhide")
layout.menu("VIEW3D_MT_bone_options_toggle", text="Bone Settings")
# Transform Menu's #
class VIEW3D_MT_TransformMenu(Menu):
bl_label = "Transform"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_ManipulatorMenu1")
UseSeparator(self, context)
layout.operator("transform.translate", text="Grab/Move")
layout.operator("transform.rotate", text="Rotate")
layout.operator("transform.resize", text="Scale")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_clear")
layout.menu("VIEW3D_MT_object_apply")
UseSeparator(self, context)
layout.operator("transform.translate", text="Move Texture Space").texture_space = True
layout.operator("transform.resize", text="Scale Texture Space").texture_space = True
UseSeparator(self, context)
layout.operator("object.randomize_transform")
layout.operator("transform.tosphere", text="To Sphere")
layout.operator("transform.shear", text="Shear")
layout.operator("transform.bend", text="Bend")
layout.operator("transform.push_pull", text="Push/Pull")
UseSeparator(self, context)
layout.operator("object.align")
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("transform.transform",
text="Align to Transform Orientation").mode = 'ALIGN'
# ********** Edit Mesh Transform **********
class VIEW3D_MT_TransformMenuEdit(Menu):
bl_label = "Transform"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_ManipulatorMenu1")
UseSeparator(self, context)
layout.operator("transform.translate", text="Grab/Move")
layout.operator("transform.rotate", text="Rotate")
layout.operator("transform.resize", text="Scale")
UseSeparator(self, context)
layout.operator("transform.tosphere", text="To Sphere")
layout.operator("transform.shear", text="Shear")
layout.operator("transform.bend", text="Bend")
layout.operator("transform.push_pull", text="Push/Pull")
layout.operator("transform.vertex_warp", text="Warp")
layout.operator("transform.vertex_random", text="Randomize")
UseSeparator(self, context)
layout.operator("transform.translate", text="Move Texture Space").texture_space = True
layout.operator("transform.resize", text="Scale Texture Space").texture_space = True
UseSeparator(self, context)
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("transform.transform",
text="Align to Transform Orientation").mode = 'ALIGN'
layout.operator_context = 'EXEC_AREA'
layout.operator("object.origin_set",
text="Geometry to Origin").type = 'GEOMETRY_ORIGIN'
# ********** Transform Lite/Short **********
class VIEW3D_MT_TransformMenuLite(Menu):
bl_label = "Transform"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_ManipulatorMenu1")
UseSeparator(self, context)
layout.operator("transform.translate", text="Grab/Move")
layout.operator("transform.rotate", text="Rotate")
layout.operator("transform.resize", text="Scale")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_object_clear")
layout.menu("VIEW3D_MT_object_apply")
UseSeparator(self, context)
layout.operator("transform.transform",
text="Align to Transform Orientation").mode = 'ALIGN'
# ********** Transform Camera **********
class VIEW3D_MT_TransformMenuCamera(Menu):
bl_label = "Transform"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_ManipulatorMenu1")
layout.menu("VIEW3D_MT_object_clear")
layout.menu("VIEW3D_MT_object_apply")
layout.operator("transform.translate", text="Grab/Move")
layout.operator("transform.rotate", text="Rotate")
layout.operator("transform.resize", text="Scale")
layout.operator("object.align")
layout.operator_context = 'EXEC_REGION_WIN'
UseSeparator(self, context)
layout.operator("transform.transform",
text="Align to Transform Orientation").mode = 'ALIGN'
# ********** Transform Armature **********
class VIEW3D_MT_TransformMenuArmature(Menu):
bl_label = "Transform"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_ManipulatorMenu1")
UseSeparator(self, context)
layout.operator("transform.translate", text="Grab/Move")
layout.operator("transform.rotate", text="Rotate")
layout.operator("transform.resize", text="Scale")
UseSeparator(self, context)
layout.operator("armature.align")
layout.operator("object.align")
layout.operator_context = 'EXEC_AREA'
UseSeparator(self, context)
layout.operator("object.origin_set",
text="Geometry to Origin").type = 'GEOMETRY_ORIGIN'
layout.operator("object.origin_set",
text="Origin to Geometry").type = 'ORIGIN_GEOMETRY'
layout.operator("object.origin_set",
text="Origin to 3D Cursor").type = 'ORIGIN_CURSOR'
layout.operator("object.origin_set",
text="Origin to Center of Mass").type = 'ORIGIN_CENTER_OF_MASS'
# ********** Transform Armature Edit **********
class VIEW3D_MT_TransformMenuArmatureEdit(Menu):
bl_label = "Transform"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_ManipulatorMenu1")
UseSeparator(self, context)
layout.operator("transform.translate", text="Grab/Move")
layout.operator("transform.rotate", text="Rotate")
layout.operator("transform.resize", text="Scale")
UseSeparator(self, context)
layout.operator("transform.tosphere", text="To Sphere")
layout.operator("transform.shear", text="Shear")
layout.operator("transform.bend", text="Bend")
layout.operator("transform.push_pull", text="Push/Pull")
layout.operator("transform.vertex_warp", text="Warp")
UseSeparator(self, context)
layout.operator("transform.vertex_random", text="Randomize")
layout.operator("armature.align")
layout.operator_context = 'EXEC_AREA'
# ********** Transform Armature Pose **********
class VIEW3D_MT_TransformMenuArmaturePose(Menu):
bl_label = "Transform"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_ManipulatorMenu1")
layout.operator("transform.translate", text="Grab/Move")
layout.operator("transform.rotate", text="Rotate")
layout.operator("transform.resize", text="Scale")
UseSeparator(self, context)
layout.operator("pose.transforms_clear", text="Clear All")
layout.operator("pose.loc_clear", text="Location")
layout.operator("pose.rot_clear", text="Rotation")
layout.operator("pose.scale_clear", text="Scale")
UseSeparator(self, context)
layout.operator("pose.user_transforms_clear", text="Reset unkeyed")
obj = context.object
if obj.type == 'ARMATURE' and obj.mode in {'EDIT', 'POSE'}:
if obj.data.draw_type == 'BBONE':
layout.operator("transform.transform", text="Scale BBone").mode = 'BONE_SIZE'
elif obj.data.draw_type == 'ENVELOPE':
layout.operator("transform.transform", text="Scale Envelope Distance").mode = 'BONE_SIZE'
layout.operator("transform.transform", text="Scale Radius").mode = 'BONE_ENVELOPE'
# View Menu's #
class VIEW3D_MT_View_Directions(Menu):
bl_label = "Directions"
def draw(self, context):
layout = self.layout
layout.operator("view3d.viewnumpad", text="Camera").type = 'CAMERA'
UseSeparator(self, context)
layout.operator("view3d.viewnumpad", text="Top").type = 'TOP'
layout.operator("view3d.viewnumpad", text="Bottom").type = 'BOTTOM'
UseSeparator(self, context)
layout.operator("view3d.viewnumpad", text="Front").type = 'FRONT'
layout.operator("view3d.viewnumpad", text="Back").type = 'BACK'
UseSeparator(self, context)
layout.operator("view3d.viewnumpad", text="Right").type = 'RIGHT'
layout.operator("view3d.viewnumpad", text="Left").type = 'LEFT'
class VIEW3D_MT_View_Border(Menu):
bl_label = "Set Border"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("view3d.clip_border", text="Clipping Border...")
layout.operator("view3d.zoom_border", text="Zoom Border...")
layout.operator("view3d.render_border", text="Render Border...").camera_only = False
class VIEW3D_MT_View_Toggle(Menu):
bl_label = "View Toggle"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("screen.area_dupli")
UseSeparator(self, context)
layout.operator("screen.region_quadview")
layout.operator("screen.screen_full_area", text="Toggle Maximize Area")
layout.operator("screen.screen_full_area").use_hide_panels = True
class VIEW3D_MT_View_Menu(Menu):
bl_label = "View"
def draw(self, context):
layout = self.layout
layout.menu("VIEW3D_MT_Shade")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_view_cameras", text="Cameras")
layout.menu("VIEW3D_MT_View_Directions")
layout.menu("VIEW3D_MT_View_Navigation")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Align")
layout.menu("VIEW3D_MT_View_Toggle")
layout.operator("view3d.view_persportho")
layout.operator("view3d.localview", text="View Global/Local")
layout.operator("view3d.view_selected").use_all_regions = False
layout.operator("view3d.view_all").center = False
UseSeparator(self, context)
layout.menu("VIEW3D_MT_View_Border")
layout.operator("view3d.layers", text="Show All Layers").nr = 0
UseSeparator(self, context)
# New menu entry for Animation player
layout.menu("VIEW3D_MT_Animation_Player",
text="Playback Animation", icon='PLAY')
class VIEW3D_MT_View_Navigation(Menu):
bl_label = "Navigation"
def draw(self, context):
from math import pi
layout = self.layout
layout.operator_enum("view3d.view_orbit", "type")
props = layout.operator("view3d.view_orbit", "Orbit Opposite")
props.type = 'ORBITRIGHT'
props.angle = pi
UseSeparator(self, context)
layout.operator("view3d.view_roll", text="Roll Left").type = 'LEFT'
layout.operator("view3d.view_roll", text="Roll Right").type = 'RIGHT'
UseSeparator(self, context)
layout.operator_enum("view3d.view_pan", "type")
UseSeparator(self, context)
layout.operator("view3d.zoom", text="Zoom In").delta = 1
layout.operator("view3d.zoom", text="Zoom Out").delta = -1
UseSeparator(self, context)
layout.operator("view3d.zoom_camera_1_to_1", text="Zoom Camera 1:1")
UseSeparator(self, context)
layout.operator("view3d.fly")
layout.operator("view3d.walk")
class VIEW3D_MT_View_Align(Menu):
bl_label = "Align View"
def draw(self, context):
layout = self.layout
layout.operator("view3d.view_all", text="Center Cursor and View All").center = True
layout.operator("view3d.view_center_cursor")
UseSeparator(self, context)
layout.operator("view3d.camera_to_view", text="Align Active Camera to View")
layout.operator("view3d.camera_to_view_selected", text="Align Active Camera to Selected")
UseSeparator(self, context)
layout.operator("view3d.view_selected")
layout.operator("view3d.view_lock_to_active")
layout.operator("view3d.view_lock_clear")
class VIEW3D_MT_View_Align_Selected(Menu):
bl_label = "Align View to Active"
def draw(self, context):
layout = self.layout
props = layout.operator("view3d.viewnumpad", text="Top")
props.align_active = True
props.type = 'TOP'
props = layout.operator("view3d.viewnumpad", text="Bottom")
props.align_active = True
props.type = 'BOTTOM'
props = layout.operator("view3d.viewnumpad", text="Front")
props.align_active = True
props.type = 'FRONT'
props = layout.operator("view3d.viewnumpad", text="Back")
props.align_active = True
props.type = 'BACK'
props = layout.operator("view3d.viewnumpad", text="Right")
props.align_active = True
props.type = 'RIGHT'
props = layout.operator("view3d.viewnumpad", text="Left")
props.align_active = True
props.type = 'LEFT'
class VIEW3D_MT_View_Cameras(Menu):
bl_label = "Cameras"
def draw(self, context):
layout = self.layout
layout.operator("view3d.object_as_camera")
layout.operator("view3d.viewnumpad", text="Active Camera").type = 'CAMERA'
# Matcap and AO, Wire all and X-Ray entries thanks to marvin.k.breuer
class VIEW3D_MT_Shade(Menu):
bl_label = "Shade"
def draw(self, context):
layout = self.layout
layout.prop(context.space_data, "viewport_shade", expand=True)
UseSeparator(self, context)
if context.active_object:
if(context.mode == 'EDIT_MESH'):
layout.operator("MESH_OT_faces_shade_smooth")
layout.operator("MESH_OT_faces_shade_flat")
else:
layout.operator("OBJECT_OT_shade_smooth")
layout.operator("OBJECT_OT_shade_flat")
UseSeparator(self, context)
layout.operator("view3d.display_wire_all", text="Wire all", icon='WIRE')
layout.prop(context.object, "show_x_ray", text="X-Ray", icon="META_CUBE")
UseSeparator(self, context)
layout.prop(context.space_data.fx_settings, "use_ssao",
text="Ambient Occlusion", icon="GROUP")
layout.prop(context.space_data, "use_matcap", icon="MATCAP_01")
if context.space_data.use_matcap:
row = layout.column(1)
row.scale_y = 0.3
row.scale_x = 0.5
row.template_icon_view(context.space_data, "matcap_icon")
# Animation Player (Thanks to marvin.k.breuer) #
class VIEW3D_MT_Animation_Player(Menu):
bl_label = "Animation Player"
def draw(self, context):
layout = self.layout
layout.operator("screen.frame_jump", text="Jump REW", icon='REW').end = False
layout.operator("screen.keyframe_jump", text="Previous FR", icon='PREV_KEYFRAME').next = False
UseSeparator(self, context)
layout.operator("screen.animation_play", text="Reverse", icon='PLAY_REVERSE').reverse = True
layout.operator("screen.animation_play", text="PLAY", icon='PLAY')
layout.operator("screen.animation_play", text="Stop", icon='PAUSE')
UseSeparator(self, context)
layout.operator("screen.keyframe_jump", text="Next FR", icon='NEXT_KEYFRAME').next = True
layout.operator("screen.frame_jump", text="Jump FF", icon='FF').end = True
# Select Menu's #
# Object Select #
class VIEW3D_MT_Select_Object(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
UseSeparator(self, context)
layout.operator("object.select_all").action = 'TOGGLE'
layout.operator("object.select_all", text="Inverse").action = 'INVERT'
layout.operator("object.select_random", text="Random")
layout.operator("object.select_mirror", text="Mirror")
UseSeparator(self, context)
layout.operator("object.select_by_layer", text="Select All by Layer")
layout.operator_menu_enum("object.select_by_type", "type",
text="Select All by Type...")
layout.operator_menu_enum("object.select_grouped", "type",
text="Grouped")
layout.operator_menu_enum("object.select_linked", "type",
text="Linked")
layout.operator("object.select_camera", text="Select Camera")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Select_Object_More_Less", text="More/Less")
layout.operator("object.select_pattern", text="Select Pattern...")
class VIEW3D_MT_Select_Object_More_Less(Menu):
bl_label = "Select More/Less"
def draw(self, context):
layout = self.layout
layout.operator("object.select_more", text="More")
layout.operator("object.select_less", text="Less")
UseSeparator(self, context)
props = layout.operator("object.select_hierarchy", text="Parent")
props.extend = False
props.direction = 'PARENT'
props = layout.operator("object.select_hierarchy", text="Child")
props.extend = False
props.direction = 'CHILD'
UseSeparator(self, context)
props = layout.operator("object.select_hierarchy", text="Extend Parent")
props.extend = True
props.direction = 'PARENT'
props = layout.operator("object.select_hierarchy", text="Extend Child")
props.extend = True
props.direction = 'CHILD'
# Edit Select #
class VIEW3D_MT_Select_Edit_Mesh(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
UseSeparator(self, context)
layout.operator("mesh.select_all").action = 'TOGGLE'
layout.operator("mesh.select_all", text="Inverse").action = 'INVERT'
layout.operator("mesh.select_linked", text="Linked")
layout.operator("mesh.faces_select_linked_flat",
text="Linked Flat Faces")
layout.operator("mesh.select_random", text="Random")
layout.operator("mesh.select_nth", text="Every N Number of Verts")
UseSeparator(self, context)
layout.menu("VIEW3D_MT_Edit_Mesh_Select_Trait")
layout.menu("VIEW3D_MT_Edit_Mesh_Select_Similar")
layout.menu("VIEW3D_MT_Edit_Mesh_Select_More_Less")
UseSeparator(self, context)
layout.operator("mesh.select_mirror", text="Mirror")
layout.operator("mesh.edges_select_sharp", text="Sharp Edges")
layout.operator("mesh.select_axis", text="Side of Active")
layout.operator("mesh.shortest_path_select", text="Shortest Path")
UseSeparator(self, context)
layout.operator("mesh.loop_multi_select", text="Edge Loops").ring = False
layout.operator("mesh.loop_multi_select", text="Edge Rings").ring = True
layout.operator("mesh.loop_to_region")
layout.operator("mesh.region_to_loop")
class VIEW3D_MT_Edit_Mesh_Select_Similar(Menu):
bl_label = "Select Similar"
def draw(self, context):
layout = self.layout
layout.operator_enum("mesh.select_similar", "type")
layout.operator("mesh.select_similar_region", text="Face Regions")
class VIEW3D_MT_Edit_Mesh_Select_Trait(Menu):
bl_label = "Select All by Trait"
def draw(self, context):
layout = self.layout
if context.scene.tool_settings.mesh_select_mode[2] is False:
layout.operator("mesh.select_non_manifold", text="Non Manifold")
layout.operator("mesh.select_loose", text="Loose Geometry")
layout.operator("mesh.select_interior_faces", text="Interior Faces")
layout.operator("mesh.select_face_by_sides", text="By Number of Verts")
layout.operator("mesh.select_ungrouped", text="Ungrouped Verts")
class VIEW3D_MT_Edit_Mesh_Select_More_Less(Menu):
bl_label = "Select More/Less"
def draw(self, context):
layout = self.layout
layout.operator("mesh.select_more", text="More")
layout.operator("mesh.select_less", text="Less")
UseSeparator(self, context)
layout.operator("mesh.select_next_item", text="Next Active")
layout.operator("mesh.select_prev_item", text="Previous Active")
# Edit Curve Select #
class VIEW3D_MT_Select_Edit_Curve(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
UseSeparator(self, context)
layout.operator("curve.select_all").action = 'TOGGLE'
layout.operator("curve.select_all", text="Inverse").action = 'INVERT'
layout.operator("curve.select_nth")
UseSeparator(self, context)
layout.operator("curve.select_random")
layout.operator("curve.select_linked", text="Select Linked")
layout.operator("curve.select_similar", text="Select Similar")
layout.operator("curve.de_select_first")
layout.operator("curve.de_select_last")
layout.operator("curve.select_next")
layout.operator("curve.select_previous")
UseSeparator(self, context)
layout.operator("curve.select_more")
layout.operator("curve.select_less")
# Armature Select #
class VIEW3D_MT_SelectArmatureMenu(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("armature.select_all")
layout.operator("armature.select_inverse", text="Inverse")
layout.operator("armature.select_hierarchy",
text="Parent").direction = 'PARENT'
layout.operator("armature.select_hierarchy",
text="Child").direction = 'CHILD'
props = layout.operator("armature.select_hierarchy",
text="Extend Parent")
props.extend = True
props.direction = 'PARENT'
props = layout.operator("armature.select_hierarchy",
text="Extend Child")
props.extend = True
props.direction = 'CHILD'
layout.operator("object.select_pattern", text="Select Pattern...")
class VIEW3D_MT_Select_Edit_Armature(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
UseSeparator(self, context)
layout.operator("armature.select_all").action = 'TOGGLE'
layout.operator("armature.select_all", text="Inverse").action = 'INVERT'
layout.operator("armature.select_mirror", text="Mirror").extend = False
UseSeparator(self, context)
layout.operator("armature.select_more", text="More")
layout.operator("armature.select_less", text="Less")
UseSeparator(self, context)
props = layout.operator("armature.select_hierarchy", text="Parent")
props.extend = False
props.direction = 'PARENT'
props = layout.operator("armature.select_hierarchy", text="Child")
props.extend = False
props.direction = 'CHILD'
UseSeparator(self, context)
props = layout.operator("armature.select_hierarchy", text="Extend Parent")
props.extend = True
props.direction = 'PARENT'
props = layout.operator("armature.select_hierarchy", text="Extend Child")
props.extend = True
props.direction = 'CHILD'
layout.operator_menu_enum("armature.select_similar", "type", text="Similar")
layout.operator("object.select_pattern", text="Select Pattern...")
class VIEW3D_MT_Select_Pose(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
UseSeparator(self, context)
layout.operator("pose.select_all").action = 'TOGGLE'
layout.operator("pose.select_all", text="Inverse").action = 'INVERT'
layout.operator("pose.select_mirror", text="Flip Active")
layout.operator("pose.select_constraint_target",
text="Constraint Target")
UseSeparator(self, context)
layout.operator("pose.select_linked", text="Linked")
layout.operator("pose.select_hierarchy",
text="Parent").direction = 'PARENT'
layout.operator("pose.select_hierarchy",
text="Child").direction = 'CHILD'
props = layout.operator("pose.select_hierarchy", text="Extend Parent")
props.extend = True
props.direction = 'PARENT'
props = layout.operator("pose.select_hierarchy", text="Extend Child")
props.extend = True
props.direction = 'CHILD'
layout.operator_menu_enum("pose.select_grouped", "type",
text="Grouped")
UseSeparator(self, context)
layout.operator("object.select_pattern", text="Select Pattern...")
layout.menu("VIEW3D_MT_select_pose_more_less")
class VIEW3D_MT_Select_Pose_More_Less(Menu):
bl_label = "Select More/Less"
def draw(self, context):
layout = self.layout
props = layout.operator("pose.select_hierarchy", text="Parent")
props.extend = False
props.direction = 'PARENT'
props = layout.operator("pose.select_hierarchy", text="Child")
props.extend = False
props.direction = 'CHILD'
props = layout.operator("pose.select_hierarchy", text="Extend Parent")
props.extend = True
props.direction = 'PARENT'
props = layout.operator("pose.select_hierarchy", text="Extend Child")
props.extend = True
props.direction = 'CHILD'
class VIEW3D_MT_PoseCopy(Menu):
bl_label = "Pose Copy"
def draw(self, context):
layout = self.layout
layout.operator("pose.copy")
layout.operator("pose.paste")
layout.operator("pose.paste",
text="Paste X-Flipped Pose").flipped = True
class VIEW3D_MT_PoseNames(Menu):
bl_label = "Pose Names"
def draw(self, context):
layout = self.layout
layout.operator_context = 'EXEC_AREA'
layout.operator("pose.autoside_names",
text="AutoName Left/Right").axis = 'XAXIS'
layout.operator("pose.autoside_names",
text="AutoName Front/Back").axis = 'YAXIS'
layout.operator("pose.autoside_names",
text="AutoName Top/Bottom").axis = 'ZAXIS'
layout.operator("pose.flip_names")
# Surface Select #
class VIEW3D_MT_Select_Edit_Surface(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
UseSeparator(self, context)
layout.operator("curve.select_all").action = 'TOGGLE'
layout.operator("curve.select_all", text="Inverse").action = 'INVERT'
layout.operator("curve.select_random")
layout.operator("curve.select_nth")
layout.operator("curve.select_linked", text="Select Linked")
layout.operator("curve.select_similar", text="Select Similar")
layout.operator("curve.select_row")
UseSeparator(self, context)
layout.operator("curve.select_more")
layout.operator("curve.select_less")
# Metaball Select #
class VIEW3D_MT_SelectMetaball(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
UseSeparator(self, context)
layout.operator("mball.select_all").action = 'TOGGLE'
layout.operator("mball.select_all").action = 'INVERT'
layout.operator("mball.select_random_metaelems")
class VIEW3D_MT_Select_Edit_Metaball(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.operator("mball.select_all").action = 'TOGGLE'
layout.operator("mball.select_all", text="Inverse").action = 'INVERT'
layout.operator("mball.select_random_metaelems")
layout.operator_menu_enum("mball.select_similar", "type", text="Similar")
# Particle Select #
class VIEW3D_MT_Selection_Mode_Particle(Menu):
bl_label = "Particle Select and Display Mode"
def draw(self, context):
layout = self.layout
toolsettings = context.tool_settings
layout.prop(toolsettings.particle_edit, "select_mode", expand=True)
class VIEW3D_MT_Select_Particle(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
UseSeparator(self, context)
layout.operator("particle.select_all").action = 'TOGGLE'
layout.operator("particle.select_linked")
layout.operator("particle.select_all", text="Inverse").action = 'INVERT'
UseSeparator(self, context)
layout.operator("particle.select_more")
layout.operator("particle.select_less")
UseSeparator(self, context)
layout.operator("particle.select_random")
UseSeparator(self, context)
layout.operator("particle.select_roots", text="Roots")
layout.operator("particle.select_tips", text="Tips")
# Lattice Edit Select #
class VIEW3D_MT_Select_Edit_Lattice(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
UseSeparator(self, context)
layout.operator("lattice.select_mirror")
layout.operator("lattice.select_random")
layout.operator("lattice.select_all").action = 'TOGGLE'
layout.operator("lattice.select_all", text="Inverse").action = 'INVERT'
UseSeparator(self, context)
layout.operator("lattice.select_ungrouped", text="Ungrouped Verts")
# Grease Pencil Select #
class VIEW3D_MT_Select_Gpencil(Menu):
# To Do: used in 3dview header might work if mapped to mouse
# Not in Class List yet
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("gpencil.select_border")
layout.operator("gpencil.select_circle")
UseSeparator(self, context)
layout.operator("gpencil.select_all", text="(De)select All").action = 'TOGGLE'
layout.operator("gpencil.select_all", text="Inverse").action = 'INVERT'
layout.operator("gpencil.select_linked", text="Linked")
# layout.operator_menu_enum("gpencil.select_grouped", "type", text="Grouped")
layout.operator("gpencil.select_grouped", text="Grouped")
UseSeparator(self, context)
layout.operator("gpencil.select_more")
layout.operator("gpencil.select_less")
# Text Select #
class VIEW3D_MT_Select_Edit_Text(Menu):
# To Do: used in 3dview header might work if mapped to mouse
# Not in Class List yet
bl_label = "Edit"
def draw(self, context):
layout = self.layout
layout.operator("font.text_copy", text="Copy")
layout.operator("font.text_cut", text="Cut")
layout.operator("font.text_paste", text="Paste")
layout.operator("font.text_paste_from_file")
layout.operator("font.select_all")
# Paint Mode Menus #
class VIEW3D_MT_Select_Paint_Mask(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.operator("paint.face_select_all").action = 'TOGGLE'
layout.operator("paint.face_select_all", text="Inverse").action = 'INVERT'
layout.operator("paint.face_select_linked", text="Linked")
class VIEW3D_MT_Select_Paint_Mask_Vertex(Menu):
bl_label = "Select"
def draw(self, context):
layout = self.layout
layout.operator("view3d.select_border")
layout.operator("view3d.select_circle")
layout.operator("paint.vert_select_all").action = 'TOGGLE'
layout.operator("paint.vert_select_all", text="Inverse").action = 'INVERT'
layout.operator("paint.vert_select_ungrouped", text="Ungrouped Verts")
class VIEW3D_MT_Angle_Control(Menu):
bl_label = "Angle Control"
@classmethod
def poll(cls, context):
settings = UnifiedPaintPanel.paint_settings(context)
if not settings:
return False
brush = settings.brush
tex_slot = brush.texture_slot
return tex_slot.has_texture_angle and tex_slot.has_texture_angle_source
def draw(self, context):
layout = self.layout
settings = UnifiedPaintPanel.paint_settings(context)
brush = settings.brush
sculpt = (context.sculpt_object is not None)
tex_slot = brush.texture_slot
layout.prop(tex_slot, "use_rake", text="Rake")
if brush.brush_capabilities.has_random_texture_angle and tex_slot.has_random_texture_angle:
if sculpt:
if brush.sculpt_capabilities.has_random_texture_angle:
layout.prop(tex_slot, "use_random", text="Random")
else:
layout.prop(tex_slot, "use_random", text="Random")
# Cursor Menu Operators #
class VIEW3D_OT_Pivot_Cursor(Operator):
bl_idname = "view3d.pivot_cursor"
bl_label = "Cursor as Pivot Point"
bl_description = "Set Pivot Point back to Cursor"
@classmethod
def poll(cls, context):
space = context.space_data
return (hasattr(space, "pivot_point") and space.pivot_point != 'CURSOR')
def execute(self, context):
bpy.context.space_data.pivot_point = 'CURSOR'
return {'FINISHED'}
class VIEW3D_OT_Revert_Pivot(Operator):
bl_idname = "view3d.revert_pivot"
bl_label = "Revert Pivot Point to Median"
bl_description = "Set Pivot Point back to Median"
@classmethod
def poll(cls, context):
space = context.space_data
return (hasattr(space, "pivot_point") and space.pivot_point != 'MEDIAN_POINT')
def execute(self, context):
bpy.context.space_data.pivot_point = 'MEDIAN_POINT'
return{'FINISHED'}
# Cursor Edge Intersection Defs #
def abs(val):
if val > 0:
return val
return -val
def edgeIntersect(context, operator):
from mathutils.geometry import intersect_line_line
obj = context.active_object
if (obj.type != "MESH"):
operator.report({'ERROR'}, "Object must be a mesh")
return None
edges = []
mesh = obj.data
verts = mesh.vertices
is_editmode = (obj.mode == 'EDIT')
if is_editmode:
bpy.ops.object.mode_set(mode='OBJECT')
for e in mesh.edges:
if e.select:
edges.append(e)
if len(edges) > 2:
break
if is_editmode:
bpy.ops.object.mode_set(mode='EDIT')
if len(edges) != 2:
operator.report({'ERROR'},
"Operator requires exactly 2 edges to be selected")
return
line = intersect_line_line(verts[edges[0].vertices[0]].co,
verts[edges[0].vertices[1]].co,
verts[edges[1].vertices[0]].co,
verts[edges[1].vertices[1]].co)
if line is None:
operator.report({'ERROR'}, "Selected edges do not intersect")
return
point = line[0].lerp(line[1], 0.5)
context.scene.cursor_location = obj.matrix_world * point
# Cursor Edge Intersection Operator #
class VIEW3D_OT_CursorToEdgeIntersection(Operator):
bl_idname = "view3d.snap_cursor_to_edge_intersection"
bl_label = "Cursor to Edge Intersection"
bl_description = "Finds the mid-point of the shortest distance between two edges"
@classmethod
def poll(cls, context):
obj = context.active_object
return (obj is not None and obj.type == 'MESH')
def execute(self, context):
# Prevent unsupported Execution in Local View modes
space_data = bpy.context.space_data
if True in space_data.layers_local_view:
self.report({'INFO'}, 'Global Perspective modes only unable to continue.')
return {'FINISHED'}
edgeIntersect(context, self)
return {'FINISHED'}
# Set Mode Operator #
class SetObjectMode(Operator):
bl_idname = "object.set_object_mode"
bl_label = "Set the object interactive mode"
bl_description = "I set the interactive mode of object"
bl_options = {'REGISTER'}
mode = StringProperty(
name="Interactive mode",
default="OBJECT"
)
def execute(self, context):
if (context.active_object):
try:
bpy.ops.object.mode_set(mode=self.mode)
except TypeError:
msg = context.active_object.name + ": It is not possible to enter into the interactive mode"
self.report(type={"WARNING"}, message=msg)
else:
self.report(type={"WARNING"}, message="There is no active object")
return {'FINISHED'}
# Origin To Selected Edit Mode #
def vfeOrigin(context):
try:
cursorPositionX = context.scene.cursor_location[0]
cursorPositionY = context.scene.cursor_location[1]
cursorPositionZ = context.scene.cursor_location[2]
bpy.ops.view3d.snap_cursor_to_selected()
bpy.ops.object.mode_set()
bpy.ops.object.origin_set(type='ORIGIN_CURSOR', center='MEDIAN')
bpy.ops.object.mode_set(mode='EDIT')
context.scene.cursor_location[0] = cursorPositionX
context.scene.cursor_location[1] = cursorPositionY
context.scene.cursor_location[2] = cursorPositionZ
return True
except:
return False
class SetOriginToSelected(Operator):
bl_idname = "object.setorigintoselected"
bl_label = "Set Origin to Selected"
bl_description = "Set Origin to Selected"
@classmethod
def poll(cls, context):
return (context.area.type == "VIEW_3D" and context.active_object is not None)
def execute(self, context):
check = vfeOrigin(context)
if not check:
self.report({"ERROR"}, "Set Origin to Selected could not be performed")
return {'CANCELLED'}
return {'FINISHED'}
# Code thanks to Isaac Weaver (wisaac) D1963
class SnapCursSelToCenter(Operator):
bl_idname = "view3d.snap_cursor_selected_to_center"
bl_label = "Snap Cursor & Selection to Center"
bl_description = ("Snap 3D cursor and selected objects to the center \n"
"Works only in Object Mode")
@classmethod
def poll(cls, context):
return (context.area.type == "VIEW_3D" and context.mode == "OBJECT")
def execute(self, context):
context.space_data.cursor_location = (0, 0, 0)
for obj in context.selected_objects:
obj.location = (0, 0, 0)
return {'FINISHED'}
# Preferences utility functions
# Draw Separator #
def UseSeparator(operator, context):
useSep = bpy.context.user_preferences.addons[__name__].preferences.use_separators
if useSep:
operator.layout.separator()
# Use compact brushes menus #
def UseBrushesLists():
# separate function just for more convience
useLists = bpy.context.user_preferences.addons[__name__].preferences.use_brushes_lists
return bool(useLists)
# Addon Preferences #
class VIEW3D_MT_Space_Dynamic_Menu_Pref(AddonPreferences):
bl_idname = __name__
use_separators = BoolProperty(
name="Use Separators in the menus",
default=True,
description=("Use separators in the menus, a trade-off between \n"
"readability vs. using more space for displaying items")
)
use_brushes_lists = BoolProperty(
name="Use compact menus for brushes",
default=False,
description=("Use more compact menus instead \n"
"of thumbnails for displaying brushes")
)
def draw(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(self, "use_separators", toggle=True)
row.prop(self, "use_brushes_lists", toggle=True)
# List The Classes #
classes = (
VIEW3D_MT_Space_Dynamic_Menu,
VIEW3D_MT_AddMenu,
VIEW3D_MT_Object,
VIEW3D_MT_Edit_Mesh,
VIEW3D_MT_TransformMenu,
VIEW3D_MT_TransformMenuEdit,
VIEW3D_MT_TransformMenuArmature,
VIEW3D_MT_TransformMenuArmatureEdit,
VIEW3D_MT_TransformMenuArmaturePose,
VIEW3D_MT_TransformMenuLite,
VIEW3D_MT_TransformMenuCamera,
VIEW3D_MT_MirrorMenu,
VIEW3D_MT_ParentMenu,
VIEW3D_MT_GroupMenu,
VIEW3D_MT_Select_Object,
VIEW3D_MT_Select_Object_More_Less,
VIEW3D_MT_Select_Edit_Mesh,
VIEW3D_MT_Edit_Mesh_Select_Similar,
VIEW3D_MT_Edit_Mesh_Select_Trait,
VIEW3D_MT_Edit_Mesh_Select_More_Less,
VIEW3D_MT_Select_Edit_Curve,
VIEW3D_MT_SelectArmatureMenu,
VIEW3D_MT_Select_Pose,
VIEW3D_MT_Select_Pose_More_Less,
VIEW3D_MT_Pose,
VIEW3D_MT_PoseCopy,
VIEW3D_MT_PoseNames,
VIEW3D_MT_Select_Edit_Surface,
VIEW3D_MT_SelectMetaball,
VIEW3D_MT_Select_Edit_Metaball,
VIEW3D_MT_Select_Particle,
VIEW3D_MT_Select_Edit_Lattice,
VIEW3D_MT_Select_Edit_Armature,
VIEW3D_MT_Select_Paint_Mask,
VIEW3D_MT_Select_Paint_Mask_Vertex,
VIEW3D_MT_Angle_Control,
VIEW3D_MT_Edit_Multi,
VIEW3D_MT_EditM_Edge,
VIEW3D_MT_Edit_Curve,
VIEW3D_MT_EditCurveCtrlpoints,
VIEW3D_MT_EditCurveSegments,
VIEW3D_MT_EditCurveSpecials,
VIEW3D_MT_Edit_Armature,
VIEW3D_MT_EditArmatureTK,
VIEW3D_MT_KeyframeMenu,
VIEW3D_OT_Pivot_Cursor,
VIEW3D_OT_Revert_Pivot,
VIEW3D_MT_CursorMenu,
VIEW3D_MT_CursorMenuLite,
VIEW3D_MT_EditCursorMenu,
VIEW3D_OT_CursorToEdgeIntersection,
VIEW3D_MT_UndoS,
VIEW3D_MT_Camera_Options,
InteractiveMode,
InteractiveModeArmature,
SetObjectMode,
VIEW3D_MT_View_Directions,
VIEW3D_MT_View_Border,
VIEW3D_MT_View_Toggle,
VIEW3D_MT_View_Menu,
VIEW3D_MT_View_Navigation,
VIEW3D_MT_View_Align,
VIEW3D_MT_View_Align_Selected,
VIEW3D_MT_View_Cameras,
VIEW3D_MT_UV_Map,
VIEW3D_MT_Pivot,
VIEW3D_Snap_Context,
VIEW3D_Snap_Origin,
VIEW3D_MT_Shade,
VIEW3D_MT_ManipulatorMenu1,
SetOriginToSelected,
VIEW3D_MT_Object_Data_Link,
VIEW3D_MT_Duplicate,
VIEW3D_MT_Space_Dynamic_Menu_Pref,
VIEW3D_MT_Selection_Mode_Particle,
VIEW3D_MT_AutoSmooth,
VIEW3D_MT_Animation_Player,
VIEW3D_OT_Interactive_Mode_Text,
SnapCursSelToCenter,
VIEW3D_MT_Sculpt_Specials,
VIEW3D_MT_Brush_Settings,
VIEW3D_MT_Brush_Selection,
VIEW3D_MT_Sculpts,
VIEW3D_MT_Hide_Masks,
VIEW3D_OT_Display_Wire_All,
VIEW3D_MT_Vertex_Colors,
VIEW3D_MT_Paint_Weights,
VIEW3D_OT_Interactive_Mode_Grease_Pencil,
VIEW3D_MT_Edit_Gpencil,
InteractiveModeOther,
)
# Register Classes & Hotkeys #
def register():
for cls in classes:
bpy.utils.register_class(cls)
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
km = kc.keymaps.new(name='3D View', space_type='VIEW_3D')
kmi = km.keymap_items.new('wm.call_menu', 'SPACE', 'PRESS')
kmi.properties.name = "VIEW3D_MT_Space_Dynamic_Menu"
# Unregister Classes & Hotkeys #
def unregister():
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
km = kc.keymaps['3D View']
for kmi in km.keymap_items:
if kmi.idname == 'wm.call_menu':
if kmi.properties.name == "VIEW3D_MT_Space_Dynamic_Menu":
km.keymap_items.remove(kmi)
break
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
| 41.597738
| 112
| 0.636592
|
60f1e32ba548a15f3cf599ff255a2c3f4da1ec46
| 58,749
|
py
|
Python
|
tests/csrf_tests/tests.py
|
omerfarukabaci/django
|
b8c0b22f2f0f8ce664642332d6d872f300c662b4
|
[
"BSD-3-Clause",
"0BSD"
] | 2
|
2018-12-26T04:15:41.000Z
|
2021-12-08T19:51:22.000Z
|
tests/csrf_tests/tests.py
|
omerfarukabaci/django
|
b8c0b22f2f0f8ce664642332d6d872f300c662b4
|
[
"BSD-3-Clause",
"0BSD"
] | 1
|
2021-06-25T15:33:37.000Z
|
2021-06-25T15:33:37.000Z
|
tests/csrf_tests/tests.py
|
omerfarukabaci/django
|
b8c0b22f2f0f8ce664642332d6d872f300c662b4
|
[
"BSD-3-Clause",
"0BSD"
] | null | null | null |
import re
from django.conf import settings
from django.contrib.sessions.backends.cache import SessionStore
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpRequest, HttpResponse, UnreadablePostError
from django.middleware.csrf import (
CSRF_ALLOWED_CHARS, CSRF_SECRET_LENGTH, CSRF_SESSION_KEY,
CSRF_TOKEN_LENGTH, REASON_BAD_ORIGIN, REASON_CSRF_TOKEN_MISSING,
REASON_NO_CSRF_COOKIE, CsrfViewMiddleware, InvalidTokenFormat,
RejectRequest, _does_token_match, _mask_cipher_secret, _sanitize_token,
_unmask_cipher_token, get_token, rotate_token,
)
from django.test import SimpleTestCase, override_settings
from django.views.decorators.csrf import csrf_exempt, requires_csrf_token
from .views import (
ensure_csrf_cookie_view, ensured_and_protected_view,
non_token_view_using_request_processor, post_form_view, protected_view,
sandwiched_rotate_token_view, token_view,
)
# This is a test (unmasked) CSRF cookie / secret.
TEST_SECRET = 'lcccccccX2kcccccccY2jcccccccssIC'
# Two masked versions of TEST_SECRET for testing purposes.
MASKED_TEST_SECRET1 = '1bcdefghij2bcdefghij3bcdefghij4bcdefghij5bcdefghij6bcdefghijABCD'
MASKED_TEST_SECRET2 = '2JgchWvM1tpxT2lfz9aydoXW9yT1DN3NdLiejYxOOlzzV4nhBbYqmqZYbAV3V5Bf'
class CsrfFunctionTestMixin:
# This method depends on _unmask_cipher_token() being correct.
def assertMaskedSecretCorrect(self, masked_secret, secret):
"""Test that a string is a valid masked version of a secret."""
self.assertEqual(len(masked_secret), CSRF_TOKEN_LENGTH)
self.assertEqual(len(secret), CSRF_SECRET_LENGTH)
self.assertTrue(
set(masked_secret).issubset(set(CSRF_ALLOWED_CHARS)),
msg=f'invalid characters in {masked_secret!r}',
)
actual = _unmask_cipher_token(masked_secret)
self.assertEqual(actual, secret)
class CsrfFunctionTests(CsrfFunctionTestMixin, SimpleTestCase):
def test_unmask_cipher_token(self):
cases = [
(TEST_SECRET, MASKED_TEST_SECRET1),
(TEST_SECRET, MASKED_TEST_SECRET2),
(
32 * 'a',
'vFioG3XOLyGyGsPRFyB9iYUs341ufzIEvFioG3XOLyGyGsPRFyB9iYUs341ufzIE',
),
(32 * 'a', 64 * 'a'),
(32 * 'a', 64 * 'b'),
(32 * 'b', 32 * 'a' + 32 * 'b'),
(32 * 'b', 32 * 'b' + 32 * 'c'),
(32 * 'c', 32 * 'a' + 32 * 'c'),
]
for secret, masked_secret in cases:
with self.subTest(masked_secret=masked_secret):
actual = _unmask_cipher_token(masked_secret)
self.assertEqual(actual, secret)
def test_mask_cipher_secret(self):
cases = [
32 * 'a',
TEST_SECRET,
'da4SrUiHJYoJ0HYQ0vcgisoIuFOxx4ER',
]
for secret in cases:
with self.subTest(secret=secret):
masked = _mask_cipher_secret(secret)
self.assertMaskedSecretCorrect(masked, secret)
def test_get_token_csrf_cookie_set(self):
request = HttpRequest()
request.META['CSRF_COOKIE'] = MASKED_TEST_SECRET1
self.assertNotIn('CSRF_COOKIE_NEEDS_UPDATE', request.META)
token = get_token(request)
self.assertNotEqual(token, MASKED_TEST_SECRET1)
self.assertMaskedSecretCorrect(token, TEST_SECRET)
# The existing cookie is preserved.
self.assertEqual(request.META['CSRF_COOKIE'], MASKED_TEST_SECRET1)
self.assertIs(request.META['CSRF_COOKIE_NEEDS_UPDATE'], True)
def test_get_token_csrf_cookie_not_set(self):
request = HttpRequest()
self.assertNotIn('CSRF_COOKIE', request.META)
self.assertNotIn('CSRF_COOKIE_NEEDS_UPDATE', request.META)
token = get_token(request)
cookie = request.META['CSRF_COOKIE']
self.assertEqual(len(cookie), CSRF_TOKEN_LENGTH)
unmasked_cookie = _unmask_cipher_token(cookie)
self.assertMaskedSecretCorrect(token, unmasked_cookie)
self.assertIs(request.META['CSRF_COOKIE_NEEDS_UPDATE'], True)
def test_rotate_token(self):
request = HttpRequest()
request.META['CSRF_COOKIE'] = MASKED_TEST_SECRET1
self.assertNotIn('CSRF_COOKIE_NEEDS_UPDATE', request.META)
rotate_token(request)
# The underlying secret was changed.
cookie = request.META['CSRF_COOKIE']
self.assertEqual(len(cookie), CSRF_TOKEN_LENGTH)
unmasked_cookie = _unmask_cipher_token(cookie)
self.assertNotEqual(unmasked_cookie, TEST_SECRET)
self.assertIs(request.META['CSRF_COOKIE_NEEDS_UPDATE'], True)
def test_sanitize_token_masked(self):
# Tokens of length CSRF_TOKEN_LENGTH are preserved.
cases = [
(MASKED_TEST_SECRET1, MASKED_TEST_SECRET1),
(64 * 'a', 64 * 'a'),
]
for token, expected in cases:
with self.subTest(token=token):
actual = _sanitize_token(token)
self.assertEqual(actual, expected)
def test_sanitize_token_unmasked(self):
# A token of length CSRF_SECRET_LENGTH is masked.
actual = _sanitize_token(TEST_SECRET)
self.assertMaskedSecretCorrect(actual, TEST_SECRET)
def test_sanitize_token_invalid(self):
cases = [
(64 * '*', 'has invalid characters'),
(16 * 'a', 'has incorrect length'),
]
for token, expected_message in cases:
with self.subTest(token=token):
with self.assertRaisesMessage(InvalidTokenFormat, expected_message):
_sanitize_token(token)
def test_does_token_match(self):
cases = [
((MASKED_TEST_SECRET1, MASKED_TEST_SECRET2), True),
((MASKED_TEST_SECRET1, 64 * 'a'), False),
]
for (token1, token2), expected in cases:
with self.subTest(token1=token1, token2=token2):
actual = _does_token_match(token1, token2)
self.assertIs(actual, expected)
class TestingSessionStore(SessionStore):
"""
A version of SessionStore that stores what cookie values are passed to
set_cookie() when CSRF_USE_SESSIONS=True.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This is a list of the cookie values passed to set_cookie() over
# the course of the request-response.
self._cookies_set = []
def __setitem__(self, key, value):
super().__setitem__(key, value)
self._cookies_set.append(value)
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that lets one track and change some things more
easily.
"""
def __init__(self):
super().__init__()
self.session = TestingSessionStore()
def is_secure(self):
return getattr(self, '_is_secure_override', False)
class PostErrorRequest(TestingHttpRequest):
"""
TestingHttpRequest that can raise errors when accessing POST data.
"""
post_error = None
def _get_post(self):
if self.post_error is not None:
raise self.post_error
return self._post
def _set_post(self, post):
self._post = post
POST = property(_get_post, _set_post)
class CsrfViewMiddlewareTestMixin(CsrfFunctionTestMixin):
"""
Shared methods and tests for session-based and cookie-based tokens.
"""
_csrf_id_cookie = MASKED_TEST_SECRET1
_csrf_id_token = MASKED_TEST_SECRET2
def _set_csrf_cookie(self, req, cookie):
raise NotImplementedError('This method must be implemented by a subclass.')
def _read_csrf_cookie(self, req, resp):
"""
Return the CSRF cookie as a string, or False if no cookie is present.
"""
raise NotImplementedError('This method must be implemented by a subclass.')
def _get_cookies_set(self, req, resp):
"""
Return a list of the cookie values passed to set_cookie() over the
course of the request-response.
"""
raise NotImplementedError('This method must be implemented by a subclass.')
def assertCookiesSet(self, req, resp, expected_secrets):
"""
Assert that set_cookie() was called with the given sequence of secrets.
"""
cookies_set = self._get_cookies_set(req, resp)
secrets_set = [_unmask_cipher_token(cookie) for cookie in cookies_set]
self.assertEqual(secrets_set, expected_secrets)
def _get_request(self, method=None, cookie=None, request_class=None):
if method is None:
method = 'GET'
if request_class is None:
request_class = TestingHttpRequest
req = request_class()
req.method = method
if cookie is not None:
self._set_csrf_cookie(req, cookie)
return req
def _get_csrf_cookie_request(
self, method=None, cookie=None, post_token=None, meta_token=None,
token_header=None, request_class=None,
):
"""
The method argument defaults to "GET". The cookie argument defaults to
this class's default test cookie. The post_token and meta_token
arguments are included in the request's req.POST and req.META headers,
respectively, when that argument is provided and non-None. The
token_header argument is the header key to use for req.META, defaults
to "HTTP_X_CSRFTOKEN".
"""
if cookie is None:
cookie = self._csrf_id_cookie
if token_header is None:
token_header = 'HTTP_X_CSRFTOKEN'
req = self._get_request(
method=method,
cookie=cookie,
request_class=request_class,
)
if post_token is not None:
req.POST['csrfmiddlewaretoken'] = post_token
if meta_token is not None:
req.META[token_header] = meta_token
return req
def _get_POST_csrf_cookie_request(
self, cookie=None, post_token=None, meta_token=None, token_header=None,
request_class=None,
):
return self._get_csrf_cookie_request(
method='POST', cookie=cookie, post_token=post_token,
meta_token=meta_token, token_header=token_header,
request_class=request_class,
)
def _get_POST_request_with_token(self, cookie=None, request_class=None):
"""The cookie argument defaults to this class's default test cookie."""
return self._get_POST_csrf_cookie_request(
cookie=cookie,
post_token=self._csrf_id_token,
request_class=request_class,
)
# This method depends on _unmask_cipher_token() being correct.
def _check_token_present(self, response, csrf_token=None):
if csrf_token is None:
csrf_secret = TEST_SECRET
else:
csrf_secret = _unmask_cipher_token(csrf_token)
text = str(response.content, response.charset)
match = re.search('name="csrfmiddlewaretoken" value="(.*?)"', text)
self.assertTrue(
match, f'Could not find a csrfmiddlewaretoken value in: {text}',
)
csrf_token = match[1]
self.assertMaskedSecretCorrect(csrf_token, csrf_secret)
def test_process_response_get_token_not_used(self):
"""
If get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
mw = CsrfViewMiddleware(non_token_view_using_request_processor)
mw.process_request(req)
mw.process_view(req, non_token_view_using_request_processor, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertIs(csrf_cookie, False)
def _check_bad_or_missing_cookie(self, cookie, expected):
"""Passing None for cookie includes no cookie."""
req = self._get_request(method='POST', cookie=cookie)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % expected)
def test_no_csrf_cookie(self):
"""
If no CSRF cookies is present, the middleware rejects the incoming
request. This will stop login CSRF.
"""
self._check_bad_or_missing_cookie(None, REASON_NO_CSRF_COOKIE)
def _check_bad_or_missing_token(
self, expected, post_token=None, meta_token=None, token_header=None,
):
req = self._get_POST_csrf_cookie_request(
post_token=post_token,
meta_token=meta_token,
token_header=token_header,
)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % expected)
def test_csrf_cookie_bad_or_missing_token(self):
"""
If a CSRF cookie is present but the token is missing or invalid, the
middleware rejects the incoming request.
"""
cases = [
(None, None, REASON_CSRF_TOKEN_MISSING),
(16 * 'a', None, 'CSRF token from POST has incorrect length.'),
(64 * '*', None, 'CSRF token from POST has invalid characters.'),
(64 * 'a', None, 'CSRF token from POST incorrect.'),
(
None,
16 * 'a',
"CSRF token from the 'X-Csrftoken' HTTP header has incorrect length.",
),
(
None,
64 * '*',
"CSRF token from the 'X-Csrftoken' HTTP header has invalid characters.",
),
(
None,
64 * 'a',
"CSRF token from the 'X-Csrftoken' HTTP header incorrect.",
),
]
for post_token, meta_token, expected in cases:
with self.subTest(post_token=post_token, meta_token=meta_token):
self._check_bad_or_missing_token(
expected,
post_token=post_token,
meta_token=meta_token,
)
@override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED')
def test_csrf_cookie_bad_token_custom_header(self):
"""
If a CSRF cookie is present and an invalid token is passed via a
custom CSRF_HEADER_NAME, the middleware rejects the incoming request.
"""
expected = (
"CSRF token from the 'X-Csrftoken-Customized' HTTP header has "
"incorrect length."
)
self._check_bad_or_missing_token(
expected,
meta_token=16 * 'a',
token_header='HTTP_X_CSRFTOKEN_CUSTOMIZED',
)
def test_process_request_csrf_cookie_and_token(self):
"""
If both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
If a CSRF cookie is present and no token, but the csrf_exempt decorator
has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, csrf_exempt(post_form_view), (), {})
self.assertIsNone(resp)
def test_csrf_token_in_header(self):
"""
The token may be passed in a header instead of in the form.
"""
req = self._get_POST_csrf_cookie_request(meta_token=self._csrf_id_token)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED')
def test_csrf_token_in_header_with_customized_name(self):
"""
settings.CSRF_HEADER_NAME can be used to customize the CSRF header name
"""
req = self._get_POST_csrf_cookie_request(
meta_token=self._csrf_id_token,
token_header='HTTP_X_CSRFTOKEN_CUSTOMIZED',
)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def test_put_and_delete_rejected(self):
"""
HTTP PUT and DELETE methods have protection
"""
req = self._get_request(method='PUT')
mw = CsrfViewMiddleware(post_form_view)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % REASON_NO_CSRF_COOKIE)
req = self._get_request(method='DELETE')
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(403, resp.status_code)
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % REASON_NO_CSRF_COOKIE)
def test_put_and_delete_allowed(self):
"""
HTTP PUT and DELETE can get through with X-CSRFToken and a cookie.
"""
req = self._get_csrf_cookie_request(method='PUT', meta_token=self._csrf_id_token)
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = self._get_csrf_cookie_request(method='DELETE', meta_token=self._csrf_id_token)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def test_rotate_token_triggers_second_reset(self):
"""
If rotate_token() is called after the token is reset in
CsrfViewMiddleware's process_response() and before another call to
the same process_response(), the cookie is reset a second time.
"""
req = self._get_POST_request_with_token()
resp = sandwiched_rotate_token_view(req)
self.assertContains(resp, 'OK')
csrf_cookie = self._read_csrf_cookie(req, resp)
actual_secret = _unmask_cipher_token(csrf_cookie)
# set_cookie() was called a second time with a different secret.
self.assertCookiesSet(req, resp, [TEST_SECRET, actual_secret])
self.assertNotEqual(actual_secret, TEST_SECRET)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
CsrfTokenNode works when no CSRF cookie is set.
"""
req = self._get_request()
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_empty_csrf_cookie(self):
"""
A new token is sent if the csrf_cookie is the empty string.
"""
req = self._get_request(cookie='')
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = token_view(req)
token = get_token(req)
self.assertIsNotNone(token)
self._check_token_present(resp, token)
def test_token_node_with_csrf_cookie(self):
"""
CsrfTokenNode works when a CSRF cookie is set.
"""
req = self._get_csrf_cookie_request()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
mw.process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_csrf_cookie_request()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
mw.process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
get_token() works for a view decorated solely with requires_csrf_token.
"""
req = self._get_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_request()
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self._check_token_present(resp, csrf_cookie)
def test_cookie_not_reset_on_accepted_request(self):
"""
The csrf token used in posts is changed on every request (although
stays equivalent). The csrf cookie should not change on accepted
requests. If it appears in the response, it should keep its value.
"""
req = self._get_POST_request_with_token()
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(
csrf_cookie, self._csrf_id_cookie,
'CSRF cookie was changed on an accepted request',
)
@override_settings(DEBUG=True, ALLOWED_HOSTS=['www.example.com'])
def test_https_bad_referer(self):
"""
A POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(post_form_view)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - https://www.evil.org/somepage does not '
'match any trusted origins.',
status_code=403,
)
def _check_referer_rejects(self, mw, req):
with self.assertRaises(RejectRequest):
mw._check_referer(req)
@override_settings(DEBUG=True)
def test_https_no_referer(self):
"""A POST HTTPS request with a missing referer is rejected."""
req = self._get_POST_request_with_token()
req._is_secure_override = True
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - no Referer.',
status_code=403,
)
def test_https_malformed_host(self):
"""
CsrfViewMiddleware generates a 403 response if it receives an HTTPS
request with a bad host.
"""
req = self._get_request(method='POST')
req._is_secure_override = True
req.META['HTTP_HOST'] = '@malformed'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(token_view)
expected = (
'Referer checking failed - https://www.evil.org/somepage does not '
'match any trusted origins.'
)
with self.assertRaisesMessage(RejectRequest, expected):
mw._check_referer(req)
response = mw.process_view(req, token_view, (), {})
self.assertEqual(response.status_code, 403)
def test_origin_malformed_host(self):
req = self._get_request(method='POST')
req._is_secure_override = True
req.META['HTTP_HOST'] = '@malformed'
req.META['HTTP_ORIGIN'] = 'https://www.evil.org'
mw = CsrfViewMiddleware(token_view)
self._check_referer_rejects(mw, req)
response = mw.process_view(req, token_view, (), {})
self.assertEqual(response.status_code, 403)
@override_settings(DEBUG=True)
def test_https_malformed_referer(self):
"""
A POST HTTPS request with a bad referer is rejected.
"""
malformed_referer_msg = 'Referer checking failed - Referer is malformed.'
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://http://www.example.com/'
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
# Empty
req.META['HTTP_REFERER'] = ''
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# Non-ASCII
req.META['HTTP_REFERER'] = 'ØBöIß'
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing scheme
# >>> urlparse('//example.com/')
# ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='')
req.META['HTTP_REFERER'] = '//example.com/'
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# missing netloc
# >>> urlparse('https://')
# ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='')
req.META['HTTP_REFERER'] = 'https://'
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
# Invalid URL
# >>> urlparse('https://[')
# ValueError: Invalid IPv6 URL
req.META['HTTP_REFERER'] = 'https://['
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(response, malformed_referer_msg, status_code=403)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer(self):
"""
A POST HTTPS request with a good referer is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com/somepage'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer_2(self):
"""
A POST HTTPS request with a good referer is accepted where the referer
contains no trailing slash.
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
def _test_https_good_referer_behind_proxy(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META.update({
'HTTP_HOST': '10.0.0.2',
'HTTP_REFERER': 'https://www.example.com/somepage',
'SERVER_PORT': '8080',
'HTTP_X_FORWARDED_HOST': 'www.example.com',
'HTTP_X_FORWARDED_PORT': '443',
})
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(CSRF_TRUSTED_ORIGINS=['https://dashboard.example.com'])
def test_https_good_referer_malformed_host(self):
"""
A POST HTTPS request is accepted if it receives a good referer with
a bad host.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = '@malformed'
req.META['HTTP_REFERER'] = 'https://dashboard.example.com/somepage'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['https://dashboard.example.com'])
def test_https_csrf_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer added to the CSRF_TRUSTED_ORIGINS
setting is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://dashboard.example.com'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['https://*.example.com'])
def test_https_csrf_wildcard_trusted_origin_allowed(self):
"""
A POST HTTPS request with a referer that matches a CSRF_TRUSTED_ORIGINS
wildcard is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://dashboard.example.com'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
def _test_https_good_referer_matches_cookie_domain(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'https://foo.example.com/'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
def _test_https_good_referer_matches_cookie_domain_with_different_port(self):
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://foo.example.com:4443/'
req.META['SERVER_PORT'] = '4443'
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
def test_ensures_csrf_cookie_no_logging(self):
"""
ensure_csrf_cookie() doesn't log warnings (#19436).
"""
with self.assertNoLogs('django.request', 'WARNING'):
req = self._get_request()
ensure_csrf_cookie_view(req)
def test_reading_post_data_raises_unreadable_post_error(self):
"""
An UnreadablePostError raised while reading the POST data should be
handled by the middleware.
"""
req = self._get_POST_request_with_token()
mw = CsrfViewMiddleware(post_form_view)
mw.process_request(req)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
req = self._get_POST_request_with_token(request_class=PostErrorRequest)
req.post_error = UnreadablePostError('Error reading input data.')
mw.process_request(req)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
resp = mw.process_view(req, post_form_view, (), {})
self.assertEqual(resp.status_code, 403)
self.assertEqual(
cm.records[0].getMessage(),
'Forbidden (%s): ' % REASON_CSRF_TOKEN_MISSING,
)
def test_reading_post_data_raises_os_error(self):
"""
An OSError raised while reading the POST data should not be handled by
the middleware.
"""
mw = CsrfViewMiddleware(post_form_view)
req = self._get_POST_request_with_token(request_class=PostErrorRequest)
req.post_error = OSError('Deleted directories/Missing permissions.')
mw.process_request(req)
with self.assertRaises(OSError):
mw.process_view(req, post_form_view, (), {})
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_bad_origin_bad_domain(self):
"""A request with a bad origin is rejected."""
req = self._get_POST_request_with_token()
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_ORIGIN'] = 'https://www.evil.org'
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META['HTTP_ORIGIN']
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % msg)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_bad_origin_null_origin(self):
"""A request with a null origin is rejected."""
req = self._get_POST_request_with_token()
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_ORIGIN'] = 'null'
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META['HTTP_ORIGIN']
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % msg)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_bad_origin_bad_protocol(self):
"""A request with an origin with wrong protocol is rejected."""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_ORIGIN'] = 'http://example.com'
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META['HTTP_ORIGIN']
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % msg)
@override_settings(
ALLOWED_HOSTS=['www.example.com'],
CSRF_TRUSTED_ORIGINS=[
'http://no-match.com',
'https://*.example.com',
'http://*.no-match.com',
'http://*.no-match-2.com',
],
)
def test_bad_origin_csrf_trusted_origin_bad_protocol(self):
"""
A request with an origin with the wrong protocol compared to
CSRF_TRUSTED_ORIGINS is rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_ORIGIN'] = 'http://foo.example.com'
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META['HTTP_ORIGIN']
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % msg)
self.assertEqual(mw.allowed_origins_exact, {'http://no-match.com'})
self.assertEqual(mw.allowed_origin_subdomains, {
'https': ['.example.com'],
'http': ['.no-match.com', '.no-match-2.com'],
})
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_bad_origin_cannot_be_parsed(self):
"""
A POST request with an origin that can't be parsed by urlparse() is
rejected.
"""
req = self._get_POST_request_with_token()
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_ORIGIN'] = 'https://['
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
self.assertIs(mw._origin_verified(req), False)
with self.assertLogs('django.security.csrf', 'WARNING') as cm:
response = mw.process_view(req, post_form_view, (), {})
self.assertEqual(response.status_code, 403)
msg = REASON_BAD_ORIGIN % req.META['HTTP_ORIGIN']
self.assertEqual(cm.records[0].getMessage(), 'Forbidden (%s): ' % msg)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_good_origin_insecure(self):
"""A POST HTTP request with a good origin is accepted."""
req = self._get_POST_request_with_token()
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_ORIGIN'] = 'http://www.example.com'
mw = CsrfViewMiddleware(post_form_view)
self.assertIs(mw._origin_verified(req), True)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_good_origin_secure(self):
"""A POST HTTPS request with a good origin is accepted."""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_ORIGIN'] = 'https://www.example.com'
mw = CsrfViewMiddleware(post_form_view)
self.assertIs(mw._origin_verified(req), True)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['https://dashboard.example.com'])
def test_good_origin_csrf_trusted_origin_allowed(self):
"""
A POST request with an origin added to the CSRF_TRUSTED_ORIGINS
setting is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_ORIGIN'] = 'https://dashboard.example.com'
mw = CsrfViewMiddleware(post_form_view)
self.assertIs(mw._origin_verified(req), True)
resp = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(resp)
self.assertEqual(mw.allowed_origins_exact, {'https://dashboard.example.com'})
self.assertEqual(mw.allowed_origin_subdomains, {})
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['https://*.example.com'])
def test_good_origin_wildcard_csrf_trusted_origin_allowed(self):
"""
A POST request with an origin that matches a CSRF_TRUSTED_ORIGINS
wildcard is accepted.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_ORIGIN'] = 'https://foo.example.com'
mw = CsrfViewMiddleware(post_form_view)
self.assertIs(mw._origin_verified(req), True)
response = mw.process_view(req, post_form_view, (), {})
self.assertIsNone(response)
self.assertEqual(mw.allowed_origins_exact, set())
self.assertEqual(mw.allowed_origin_subdomains, {'https': ['.example.com']})
class CsrfViewMiddlewareTests(CsrfViewMiddlewareTestMixin, SimpleTestCase):
def _set_csrf_cookie(self, req, cookie):
req.COOKIES[settings.CSRF_COOKIE_NAME] = cookie
def _read_csrf_cookie(self, req, resp):
"""
Return the CSRF cookie as a string, or False if no cookie is present.
"""
if settings.CSRF_COOKIE_NAME not in resp.cookies:
return False
csrf_cookie = resp.cookies[settings.CSRF_COOKIE_NAME]
return csrf_cookie.value
def _get_cookies_set(self, req, resp):
return resp._cookies_set
def test_ensures_csrf_cookie_no_middleware(self):
"""
The ensure_csrf_cookie() decorator works without middleware.
"""
req = self._get_request()
resp = ensure_csrf_cookie_view(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie)
self.assertIn('Cookie', resp.get('Vary', ''))
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
req = self._get_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie)
self.assertIn('Cookie', resp.get('Vary', ''))
def test_csrf_cookie_age(self):
"""
CSRF cookie age can be set using settings.CSRF_COOKIE_AGE.
"""
req = self._get_request()
MAX_AGE = 123
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
max_age = resp.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, MAX_AGE)
def test_csrf_cookie_age_none(self):
"""
CSRF cookie age does not have max age set and therefore uses
session-based cookies.
"""
req = self._get_request()
MAX_AGE = None
with self.settings(CSRF_COOKIE_NAME='csrfcookie',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_AGE=MAX_AGE,
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True,
CSRF_COOKIE_HTTPONLY=True):
# token_view calls get_token() indirectly
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
max_age = resp.cookies.get('csrfcookie').get('max-age')
self.assertEqual(max_age, '')
def test_csrf_cookie_samesite(self):
req = self._get_request()
with self.settings(CSRF_COOKIE_NAME='csrfcookie', CSRF_COOKIE_SAMESITE='Strict'):
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
self.assertEqual(resp.cookies['csrfcookie']['samesite'], 'Strict')
def test_bad_csrf_cookie_characters(self):
"""
If the CSRF cookie has invalid characters in a POST request, the
middleware rejects the incoming request.
"""
self._check_bad_or_missing_cookie(64 * '*', 'CSRF cookie has invalid characters.')
def test_bad_csrf_cookie_length(self):
"""
If the CSRF cookie has an incorrect length in a POST request, the
middleware rejects the incoming request.
"""
self._check_bad_or_missing_cookie(16 * 'a', 'CSRF cookie has incorrect length.')
def test_process_view_token_too_long(self):
"""
If the token is longer than expected, it is ignored and a new token is
created.
"""
req = self._get_request(cookie='x' * 100000)
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(len(csrf_cookie), CSRF_TOKEN_LENGTH)
def test_process_view_token_invalid_chars(self):
"""
If the token contains non-alphanumeric characters, it is ignored and a
new token is created.
"""
token = ('!@#' + self._csrf_id_token)[:CSRF_TOKEN_LENGTH]
req = self._get_request(cookie=token)
mw = CsrfViewMiddleware(token_view)
mw.process_view(req, token_view, (), {})
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(len(csrf_cookie), CSRF_TOKEN_LENGTH)
self.assertNotEqual(csrf_cookie, token)
def test_masked_unmasked_combinations(self):
"""
All combinations are allowed of (1) masked and unmasked cookies,
(2) masked and unmasked tokens, and (3) tokens provided via POST and
the X-CSRFToken header.
"""
cases = [
(TEST_SECRET, TEST_SECRET, None),
(TEST_SECRET, MASKED_TEST_SECRET2, None),
(TEST_SECRET, None, TEST_SECRET),
(TEST_SECRET, None, MASKED_TEST_SECRET2),
(MASKED_TEST_SECRET1, TEST_SECRET, None),
(MASKED_TEST_SECRET1, MASKED_TEST_SECRET2, None),
(MASKED_TEST_SECRET1, None, TEST_SECRET),
(MASKED_TEST_SECRET1, None, MASKED_TEST_SECRET2),
]
for args in cases:
with self.subTest(args=args):
cookie, post_token, meta_token = args
req = self._get_POST_csrf_cookie_request(
cookie=cookie, post_token=post_token, meta_token=meta_token,
)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
def test_cookie_reset_only_once(self):
"""
A CSRF cookie that needs to be reset is reset only once when the view
is decorated with both ensure_csrf_cookie and csrf_protect.
"""
# Pass an unmasked cookie to trigger a cookie reset.
req = self._get_POST_request_with_token(cookie=TEST_SECRET)
resp = ensured_and_protected_view(req)
self.assertContains(resp, 'OK')
csrf_cookie = self._read_csrf_cookie(req, resp)
actual_secret = _unmask_cipher_token(csrf_cookie)
self.assertEqual(actual_secret, TEST_SECRET)
# set_cookie() was called only once and with the expected secret.
self.assertCookiesSet(req, resp, [TEST_SECRET])
def test_invalid_cookie_replaced_on_GET(self):
"""
A CSRF cookie with the wrong format is replaced during a GET request.
"""
req = self._get_request(cookie='badvalue')
resp = protected_view(req)
self.assertContains(resp, 'OK')
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie, msg='No CSRF cookie was sent.')
self.assertEqual(len(csrf_cookie), CSRF_TOKEN_LENGTH)
def test_unmasked_secret_replaced_on_GET(self):
"""An unmasked CSRF cookie is replaced during a GET request."""
req = self._get_request(cookie=TEST_SECRET)
resp = protected_view(req)
self.assertContains(resp, 'OK')
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertTrue(csrf_cookie, msg='No CSRF cookie was sent.')
self.assertMaskedSecretCorrect(csrf_cookie, TEST_SECRET)
def test_masked_secret_not_replaced_on_GET(self):
"""A masked CSRF cookie is not replaced during a GET request."""
req = self._get_request(cookie=MASKED_TEST_SECRET1)
resp = protected_view(req)
self.assertContains(resp, 'OK')
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertFalse(csrf_cookie, msg='A CSRF cookie was sent.')
def test_masked_secret_accepted_and_not_replaced(self):
"""
The csrf cookie is left unchanged if originally masked.
"""
req = self._get_POST_request_with_token(cookie=MASKED_TEST_SECRET1)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
self.assertEqual(csrf_cookie, MASKED_TEST_SECRET1)
self._check_token_present(resp, csrf_cookie)
def test_bare_secret_accepted_and_replaced(self):
"""
The csrf cookie is reset (masked) if originally not masked.
"""
req = self._get_POST_request_with_token(cookie=TEST_SECRET)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
resp = mw(req)
csrf_cookie = self._read_csrf_cookie(req, resp)
# This also checks that csrf_cookie now has length CSRF_TOKEN_LENGTH.
self.assertMaskedSecretCorrect(csrf_cookie, TEST_SECRET)
self._check_token_present(resp, csrf_cookie)
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com', USE_X_FORWARDED_PORT=True)
def test_https_good_referer_behind_proxy(self):
"""
A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True.
"""
self._test_https_good_referer_behind_proxy()
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN.
"""
self._test_https_good_referer_matches_cookie_domain()
@override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by CSRF_COOKIE_DOMAIN and a non-443 port.
"""
self._test_https_good_referer_matches_cookie_domain_with_different_port()
@override_settings(CSRF_COOKIE_DOMAIN='.example.com', DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://example.com/'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(post_form_view)
self._check_referer_rejects(mw, req)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
@override_settings(CSRF_USE_SESSIONS=True, CSRF_COOKIE_DOMAIN=None)
class CsrfViewMiddlewareUseSessionsTests(CsrfViewMiddlewareTestMixin, SimpleTestCase):
"""
CSRF tests with CSRF_USE_SESSIONS=True.
"""
def _set_csrf_cookie(self, req, cookie):
req.session[CSRF_SESSION_KEY] = cookie
def _read_csrf_cookie(self, req, resp=None):
"""
Return the CSRF cookie as a string, or False if no cookie is present.
"""
if CSRF_SESSION_KEY not in req.session:
return False
return req.session[CSRF_SESSION_KEY]
def _get_cookies_set(self, req, resp):
return req.session._cookies_set
def test_no_session_on_request(self):
msg = (
'CSRF_USE_SESSIONS is enabled, but request.session is not set. '
'SessionMiddleware must appear before CsrfViewMiddleware in MIDDLEWARE.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
mw = CsrfViewMiddleware(lambda req: HttpResponse())
mw.process_request(HttpRequest())
def test_masked_unmasked_combinations(self):
"""
Masked and unmasked tokens are allowed both as POST and as the
X-CSRFToken header.
"""
cases = [
# Bare secrets are not allowed when CSRF_USE_SESSIONS=True.
(MASKED_TEST_SECRET1, TEST_SECRET, None),
(MASKED_TEST_SECRET1, MASKED_TEST_SECRET2, None),
(MASKED_TEST_SECRET1, None, TEST_SECRET),
(MASKED_TEST_SECRET1, None, MASKED_TEST_SECRET2),
]
for args in cases:
with self.subTest(args=args):
cookie, post_token, meta_token = args
req = self._get_POST_csrf_cookie_request(
cookie=cookie, post_token=post_token, meta_token=meta_token,
)
mw = CsrfViewMiddleware(token_view)
mw.process_request(req)
resp = mw.process_view(req, token_view, (), {})
self.assertIsNone(resp)
def test_process_response_get_token_used(self):
"""The ensure_csrf_cookie() decorator works without middleware."""
req = self._get_request()
ensure_csrf_cookie_view(req)
csrf_cookie = self._read_csrf_cookie(req)
self.assertTrue(csrf_cookie)
def test_session_modify(self):
"""The session isn't saved if the CSRF cookie is unchanged."""
req = self._get_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
csrf_cookie = self._read_csrf_cookie(req)
self.assertTrue(csrf_cookie)
req.session.modified = False
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
self.assertFalse(req.session.modified)
def test_ensures_csrf_cookie_with_middleware(self):
"""
The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware
enabled.
"""
req = self._get_request()
mw = CsrfViewMiddleware(ensure_csrf_cookie_view)
mw.process_view(req, ensure_csrf_cookie_view, (), {})
mw(req)
csrf_cookie = self._read_csrf_cookie(req)
self.assertTrue(csrf_cookie)
@override_settings(
ALLOWED_HOSTS=['www.example.com'],
SESSION_COOKIE_DOMAIN='.example.com',
USE_X_FORWARDED_PORT=True,
DEBUG=True,
)
def test_https_good_referer_behind_proxy(self):
"""
A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True.
"""
self._test_https_good_referer_behind_proxy()
@override_settings(ALLOWED_HOSTS=['www.example.com'], SESSION_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by SESSION_COOKIE_DOMAIN.
"""
self._test_https_good_referer_matches_cookie_domain()
@override_settings(ALLOWED_HOSTS=['www.example.com'], SESSION_COOKIE_DOMAIN='.example.com')
def test_https_good_referer_matches_cookie_domain_with_different_port(self):
"""
A POST HTTPS request with a good referer should be accepted from a
subdomain that's allowed by SESSION_COOKIE_DOMAIN and a non-443 port.
"""
self._test_https_good_referer_matches_cookie_domain_with_different_port()
@override_settings(SESSION_COOKIE_DOMAIN='.example.com', DEBUG=True)
def test_https_reject_insecure_referer(self):
"""
A POST HTTPS request from an insecure referer should be rejected.
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_REFERER'] = 'http://example.com/'
req.META['SERVER_PORT'] = '443'
mw = CsrfViewMiddleware(post_form_view)
response = mw.process_view(req, post_form_view, (), {})
self.assertContains(
response,
'Referer checking failed - Referer is insecure while host is secure.',
status_code=403,
)
@override_settings(ROOT_URLCONF='csrf_tests.csrf_token_error_handler_urls', DEBUG=False)
class CsrfInErrorHandlingViewsTests(CsrfFunctionTestMixin, SimpleTestCase):
def test_csrf_token_on_404_stays_constant(self):
response = self.client.get('/does not exist/')
# The error handler returns status code 599.
self.assertEqual(response.status_code, 599)
token1 = response.content.decode('ascii')
response = self.client.get('/does not exist/')
self.assertEqual(response.status_code, 599)
token2 = response.content.decode('ascii')
secret2 = _unmask_cipher_token(token2)
self.assertMaskedSecretCorrect(token1, secret2)
| 41.665957
| 119
| 0.646615
|
0193ab249e1aa66dbcf7b0602f6ed4a677760ce3
| 592
|
py
|
Python
|
var/spack/repos/builtin/packages/py-hyperframe/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3
|
2021-09-29T02:14:40.000Z
|
2022-01-27T20:50:36.000Z
|
var/spack/repos/builtin/packages/py-hyperframe/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2022-02-28T11:30:18.000Z
|
2022-03-23T19:34:56.000Z
|
var/spack/repos/builtin/packages/py-hyperframe/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyHyperframe(PythonPackage):
"""HTTP/2 framing layer for Python"""
homepage = "https://github.com/python-hyper/hyperframe/"
pypi = "hyperframe/hyperframe-6.0.0.tar.gz"
version('6.0.0', sha256='742d2a4bc3152a340a49d59f32e33ec420aa8e7054c1444ef5c7efff255842f1')
depends_on('py-setuptools', type='build')
depends_on('py-wheel', type='build')
| 31.157895
| 95
| 0.738176
|
484d78ff4bb2ae0368770c07c63132f6c0c8b4ce
| 1,570
|
py
|
Python
|
src/bpp/migrations/0148_charakter_formalny_nadrzedny.py
|
iplweb/django-bpp
|
85f183a99d8d5027ae4772efac1e4a9f21675849
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T19:50:02.000Z
|
2017-04-27T19:50:02.000Z
|
src/bpp/migrations/0148_charakter_formalny_nadrzedny.py
|
mpasternak/django-bpp
|
434338821d5ad1aaee598f6327151aba0af66f5e
|
[
"BSD-3-Clause"
] | 41
|
2019-11-07T00:07:02.000Z
|
2022-02-27T22:09:39.000Z
|
src/bpp/migrations/0148_charakter_formalny_nadrzedny.py
|
iplweb/bpp
|
f027415cc3faf1ca79082bf7bacd4be35b1a6fdf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-07-07 14:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('bpp', '0147_auto_20180707_1517'),
]
operations = [
migrations.AddField(
model_name='charakter_formalny',
name='level',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='charakter_formalny',
name='lft',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='charakter_formalny',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='bpp.Charakter_Formalny'),
),
migrations.AddField(
model_name='charakter_formalny',
name='rght',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='charakter_formalny',
name='tree_id',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
preserve_default=False,
),
]
| 33.404255
| 167
| 0.626115
|
cb091ac69c6f5df0b50614942361b45acb02937d
| 1,086
|
py
|
Python
|
3]. Competitive Programming/08]. LeetCode/1]. Problems/Python/0030)_Substring_with_Concatenation_of_All_words.py
|
Utqrsh04/The-Complete-FAANG-Preparation
|
a0a4a6ef8768d047f4c2d7b8553732364a26e08e
|
[
"MIT"
] | 6,969
|
2021-05-29T11:38:30.000Z
|
2022-03-31T19:31:49.000Z
|
3]. Competitive Programming/08]. LeetCode/1]. Problems/Python/0030)_Substring_with_Concatenation_of_All_words.py
|
thisisbillall/The-Complete-FAANG-Preparation
|
b0c761e2ceb08c92c3b62d7c00b6e8835653cb6e
|
[
"MIT"
] | 75
|
2021-06-15T07:59:43.000Z
|
2022-02-22T14:21:52.000Z
|
3]. Competitive Programming/08]. LeetCode/1]. Problems/Python/0030)_Substring_with_Concatenation_of_All_words.py
|
thisisbillall/The-Complete-FAANG-Preparation
|
b0c761e2ceb08c92c3b62d7c00b6e8835653cb6e
|
[
"MIT"
] | 1,524
|
2021-05-29T16:03:36.000Z
|
2022-03-31T17:46:13.000Z
|
class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
result = []
word_len = len(words[0])
for stripe in range(word_len):
i = stripe
to_match = len(words)
freq = Counter(words)
while i+to_match*word_len <= len(s):
word = s[i:i+word_len]
if word in freq:
freq[word] -= 1
if freq[word] == 0:
del freq[word]
to_match -= 1
i += word_len
if to_match == 0:
result.append(i - word_len*len(words))
elif to_match != len(words):
nb_matches = len(words) - to_match
first_word = s[i - nb_matches*word_len:i - (nb_matches-1)*word_len]
freq.setdefault(first_word, 0)
freq[first_word] += 1
to_match += 1
else:
i += word_len
return result
| 37.448276
| 87
| 0.418048
|
f5ebfa9b75cbc52f8e4d000f92d0013a9fc5adc6
| 7,478
|
py
|
Python
|
tipping/src/tipping/api.py
|
tipresias/tipresias
|
8945acb8276f22f2159c24e5a1bd411e7920a79e
|
[
"MIT"
] | 12
|
2019-05-27T10:28:59.000Z
|
2022-02-01T23:49:50.000Z
|
tipping/src/tipping/api.py
|
tipresias/tipresias
|
8945acb8276f22f2159c24e5a1bd411e7920a79e
|
[
"MIT"
] | 275
|
2019-05-27T06:46:37.000Z
|
2022-03-18T03:58:33.000Z
|
tipping/src/tipping/api.py
|
tipresias/tipresias
|
8945acb8276f22f2159c24e5a1bd411e7920a79e
|
[
"MIT"
] | 6
|
2019-08-27T08:49:50.000Z
|
2021-10-04T12:28:37.000Z
|
"""External-facing API for fetching and updating application data."""
from typing import Optional
from datetime import datetime, timezone
from warnings import warn
import pandas as pd
from tipping import data_import, data_export
from tipping.helpers import pivot_team_matches_to_matches
from tipping.tipping import MonashSubmitter
from tipping import models
from tipping import settings
DEC = 12
THIRTY_FIRST = 31
JAN = 1
FIRST = 1
def _select_matches_from_current_round(
fixture_data_frame: pd.DataFrame, beginning_of_today: datetime, after=True
) -> Optional[pd.DataFrame]:
if not fixture_data_frame.any().any():
warn(
"Fixture for the upcoming round haven't been posted yet, "
"so there's nothing to tip. Try again later."
)
return None
latest_match_date = fixture_data_frame["date"].max()
if beginning_of_today > latest_match_date and after:
warn(
f"No matches found after {beginning_of_today}. The latest match "
f"found is at {latest_match_date}\n"
)
return None
date_comparison = ">" if after else "<"
latest_round_numbers = fixture_data_frame.query(
f"date {date_comparison} @beginning_of_today"
).loc[:, "round_number"]
if not any(latest_round_numbers):
return None
current_round = int( # pylint: disable=unused-variable
latest_round_numbers.min() if after else latest_round_numbers.max()
)
fixture_for_current_round = fixture_data_frame.query(
"round_number == @current_round"
)
return fixture_for_current_round
def _fetch_current_round_fixture(verbose, after=True) -> Optional[pd.DataFrame]:
right_now = datetime.now(tz=timezone.utc)
beginning_of_today = right_now.replace(hour=0, minute=0, second=0, microsecond=0)
beginning_of_this_year = datetime(
beginning_of_today.year, JAN, FIRST, tzinfo=timezone.utc
)
end_of_this_year = datetime(
beginning_of_today.year, DEC, THIRTY_FIRST, tzinfo=timezone.utc
)
if verbose == 1:
preposition = "after" if after else "up to"
print(f"Fetching fixture for matches {preposition} {beginning_of_today}...\n")
fixture_data_frame = data_import.DataImporter().fetch_fixture_data(
start_date=beginning_of_this_year,
end_date=end_of_this_year,
)
return _select_matches_from_current_round(
fixture_data_frame, beginning_of_today, after=after
)
def update_fixture_data(verbose: int = 1) -> None:
"""
Fetch fixture data and send upcoming match data to the main app.
Params:
-------
verbose: How much information to print. 1 prints all messages; 0 prints none.
"""
right_now = datetime.now(tz=timezone.utc) # pylint: disable=unused-variable
matches_from_upcoming_round = _fetch_current_round_fixture(verbose)
if matches_from_upcoming_round is None:
return None
upcoming_round = (
matches_from_upcoming_round["round_number"].drop_duplicates().iloc[0]
)
future_matches = matches_from_upcoming_round.query("date > @right_now")
data_export.update_fixture_data(future_matches, upcoming_round)
db_session = settings.Session()
matches = models.Match.from_future_fixtures(
db_session, future_matches, upcoming_round
)
for match in matches:
db_session.add(match)
db_session.commit()
return None
def update_match_predictions(
tips_submitters=None, verbose=1, ml_model_names: Optional[str] = None
) -> None:
"""Fetch predictions from ML models and send them to the main app.
Params:
-------
tips_submitters: Objects that handle submission of tips to competitions sites.
verbose: How much information to print. 1 prints all messages; 0 prints none.
ml_model_names: Comma-separated string of ML model names to use
for making predictions.
"""
matches_from_current_round = _fetch_current_round_fixture(verbose)
if matches_from_current_round is None:
return None
current_round = matches_from_current_round["round_number"].min()
current_season = matches_from_current_round["date"].min().year
if verbose == 1:
print("Fetching predictions for round " f"{current_round}, {current_season}...")
prediction_data = data_import.DataImporter().fetch_prediction_data(
f"{current_season}-{current_season + 1}",
round_number=current_round,
ml_model_names=ml_model_names,
)
if verbose == 1:
print("Predictions received!")
match_predictions = pivot_team_matches_to_matches(prediction_data)
updated_prediction_records = data_export.update_match_predictions(match_predictions)
if verbose == 1:
print("Match predictions sent!")
if not updated_prediction_records.any().any():
if verbose == 1:
print(
"No predictions found for the upcoming round. "
"Not submitting any tips."
)
return None
tips_submitters = tips_submitters or [
MonashSubmitter(verbose=verbose),
]
for submitter in tips_submitters:
submitter.submit_tips(updated_prediction_records)
return None
def update_matches(verbose=1) -> None:
"""
Fetch match data and send them to the main app.
verbose: How much information to print. 1 prints all messages; 0 prints none.
"""
right_now = datetime.now()
start_of_year = datetime(right_now.year, JAN, FIRST).strftime("%Y-%m-%d")
end_of_year = datetime(right_now.year, DEC, THIRTY_FIRST).strftime("%Y-%m-%d")
if verbose == 1:
print(f"Fetching match data for season {right_now.year}")
match_data = data_import.DataImporter().fetch_match_data(
start_of_year, end_of_year, fetch_data=True
)
if verbose == 1:
print("Match data received!")
data_export.update_matches(match_data)
db_session = settings.Session()
match_query = models.Match.played_without_results()
matches_without_results = db_session.execute(match_query).scalars().all()
if not any(matches_without_results):
return None
models.Match.update_results(matches_without_results, match_data)
db_session.commit()
if verbose == 1:
print("Match data sent!")
def update_match_results(verbose=1) -> None:
"""
Fetch minimal match results data and send them to the main app.
verbose: How much information to print. 1 prints all messages; 0 prints none.
"""
matches_from_current_round = _fetch_current_round_fixture(verbose, after=False)
if matches_from_current_round is None:
return None
current_round = matches_from_current_round["round_number"].min()
if verbose == 1:
print(f"Fetching match results for round {current_round}")
match_results_data = data_import.DataImporter().fetch_match_results_data(
current_round
)
if match_results_data.empty:
return None
data_export.update_match_results(match_results_data)
db_session = settings.Session()
match_query = models.Match.played_without_results()
matches_without_results = db_session.execute(match_query).scalars().all()
if not any(matches_without_results):
return None
models.Match.update_results(matches_without_results, match_results_data)
db_session.commit()
if verbose == 1:
print("Match data saved!")
return None
| 29.440945
| 88
| 0.701792
|
275d3449f812869a44c726da9340c67125e7bb86
| 43,996
|
py
|
Python
|
Lib/site-packages/mypyc/irbuild/function.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/mypyc/irbuild/function.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/mypyc/irbuild/function.py
|
edupyter/EDUPYTER38
|
396183cea72987506f1ef647c0272a2577c56218
|
[
"bzip2-1.0.6"
] | null | null | null |
"""Transform mypy AST functions to IR (and related things).
Normal functions are translated into a list of basic blocks
containing various IR ops (defined in mypyc.ir.ops).
This also deals with generators, async functions and nested
functions. All of these are transformed into callable classes. These
have a custom __call__ method that implements the call, and state, such
as an environment containing non-local variables, is stored in the
instance of the callable class.
"""
from typing import (
DefaultDict, NamedTuple, Optional, List, Sequence, Tuple, Union, Dict,
)
from mypy.nodes import (
ClassDef, FuncDef, OverloadedFuncDef, Decorator, Var, YieldFromExpr, AwaitExpr, YieldExpr,
FuncItem, LambdaExpr, SymbolNode, ArgKind, TypeInfo
)
from mypy.types import CallableType, get_proper_type
from mypyc.ir.ops import (
BasicBlock, Value, Register, Return, SetAttr, Integer, GetAttr, Branch, InitStatic,
LoadAddress, LoadLiteral, Unbox, Unreachable,
)
from mypyc.ir.rtypes import (
object_rprimitive, RInstance, object_pointer_rprimitive, dict_rprimitive, int_rprimitive,
bool_rprimitive,
)
from mypyc.ir.func_ir import (
FuncIR, FuncSignature, RuntimeArg, FuncDecl, FUNC_CLASSMETHOD, FUNC_STATICMETHOD, FUNC_NORMAL
)
from mypyc.ir.class_ir import ClassIR, NonExtClassInfo
from mypyc.primitives.generic_ops import py_setattr_op, next_raw_op, iter_op
from mypyc.primitives.misc_ops import (
check_stop_op, yield_from_except_op, coro_op, send_op, register_function
)
from mypyc.primitives.dict_ops import dict_set_item_op, dict_new_op, dict_get_method_with_none
from mypyc.common import SELF_NAME, LAMBDA_NAME
from mypyc.sametype import is_same_method_signature
from mypyc.irbuild.util import is_constant
from mypyc.irbuild.context import FuncInfo, ImplicitClass
from mypyc.irbuild.targets import AssignmentTarget
from mypyc.irbuild.statement import transform_try_except
from mypyc.irbuild.builder import IRBuilder, SymbolTarget, gen_arg_defaults
from mypyc.irbuild.callable_class import (
setup_callable_class, add_call_to_callable_class, add_get_to_callable_class,
instantiate_callable_class
)
from mypyc.irbuild.generator import (
gen_generator_func, setup_env_for_generator_class, create_switch_for_generator_class,
add_raise_exception_blocks_to_generator_class, populate_switch_for_generator_class,
add_methods_to_generator_class
)
from mypyc.irbuild.env_class import (
setup_env_class, load_outer_envs, load_env_registers, finalize_env_class,
setup_func_for_recursive_call
)
from mypyc.primitives.registry import builtin_names
from collections import defaultdict
# Top-level transform functions
def transform_func_def(builder: IRBuilder, fdef: FuncDef) -> None:
func_ir, func_reg = gen_func_item(builder, fdef, fdef.name, builder.mapper.fdef_to_sig(fdef))
# If the function that was visited was a nested function, then either look it up in our
# current environment or define it if it was not already defined.
if func_reg:
builder.assign(get_func_target(builder, fdef), func_reg, fdef.line)
maybe_insert_into_registry_dict(builder, fdef)
builder.functions.append(func_ir)
def transform_overloaded_func_def(builder: IRBuilder, o: OverloadedFuncDef) -> None:
# Handle regular overload case
assert o.impl
builder.accept(o.impl)
def transform_decorator(builder: IRBuilder, dec: Decorator) -> None:
func_ir, func_reg = gen_func_item(
builder,
dec.func,
dec.func.name,
builder.mapper.fdef_to_sig(dec.func)
)
decorated_func: Optional[Value] = None
if func_reg:
decorated_func = load_decorated_func(builder, dec.func, func_reg)
builder.assign(get_func_target(builder, dec.func), decorated_func, dec.func.line)
func_reg = decorated_func
# If the prebuild pass didn't put this function in the function to decorators map (for example
# if this is a registered singledispatch implementation with no other decorators), we should
# treat this function as a regular function, not a decorated function
elif dec.func in builder.fdefs_to_decorators:
# Obtain the the function name in order to construct the name of the helper function.
name = dec.func.fullname.split('.')[-1]
# Load the callable object representing the non-decorated function, and decorate it.
orig_func = builder.load_global_str(name, dec.line)
decorated_func = load_decorated_func(builder, dec.func, orig_func)
if decorated_func is not None:
# Set the callable object representing the decorated function as a global.
builder.call_c(dict_set_item_op,
[builder.load_globals_dict(),
builder.load_str(dec.func.name), decorated_func],
decorated_func.line)
maybe_insert_into_registry_dict(builder, dec.func)
builder.functions.append(func_ir)
def transform_lambda_expr(builder: IRBuilder, expr: LambdaExpr) -> Value:
typ = get_proper_type(builder.types[expr])
assert isinstance(typ, CallableType)
runtime_args = []
for arg, arg_type in zip(expr.arguments, typ.arg_types):
arg.variable.type = arg_type
runtime_args.append(
RuntimeArg(arg.variable.name, builder.type_to_rtype(arg_type), arg.kind))
ret_type = builder.type_to_rtype(typ.ret_type)
fsig = FuncSignature(runtime_args, ret_type)
fname = f'{LAMBDA_NAME}{builder.lambda_counter}'
builder.lambda_counter += 1
func_ir, func_reg = gen_func_item(builder, expr, fname, fsig)
assert func_reg is not None
builder.functions.append(func_ir)
return func_reg
def transform_yield_expr(builder: IRBuilder, expr: YieldExpr) -> Value:
if builder.fn_info.is_coroutine:
builder.error('async generators are unimplemented', expr.line)
if expr.expr:
retval = builder.accept(expr.expr)
else:
retval = builder.builder.none()
return emit_yield(builder, retval, expr.line)
def transform_yield_from_expr(builder: IRBuilder, o: YieldFromExpr) -> Value:
return handle_yield_from_and_await(builder, o)
def transform_await_expr(builder: IRBuilder, o: AwaitExpr) -> Value:
return handle_yield_from_and_await(builder, o)
# Internal functions
def gen_func_item(builder: IRBuilder,
fitem: FuncItem,
name: str,
sig: FuncSignature,
cdef: Optional[ClassDef] = None,
) -> Tuple[FuncIR, Optional[Value]]:
"""Generate and return the FuncIR for a given FuncDef.
If the given FuncItem is a nested function, then we generate a
callable class representing the function and use that instead of
the actual function. if the given FuncItem contains a nested
function, then we generate an environment class so that inner
nested functions can access the environment of the given FuncDef.
Consider the following nested function:
def a() -> None:
def b() -> None:
def c() -> None:
return None
return None
return None
The classes generated would look something like the following.
has pointer to +-------+
+--------------------------> | a_env |
| +-------+
| ^
| | has pointer to
+-------+ associated with +-------+
| b_obj | -------------------> | b_env |
+-------+ +-------+
^
|
+-------+ has pointer to |
| c_obj | --------------------------+
+-------+
"""
# TODO: do something about abstract methods.
func_reg: Optional[Value] = None
# We treat lambdas as always being nested because we always generate
# a class for lambdas, no matter where they are. (It would probably also
# work to special case toplevel lambdas and generate a non-class function.)
is_nested = fitem in builder.nested_fitems or isinstance(fitem, LambdaExpr)
contains_nested = fitem in builder.encapsulating_funcs.keys()
is_decorated = fitem in builder.fdefs_to_decorators
is_singledispatch = fitem in builder.singledispatch_impls
in_non_ext = False
class_name = None
if cdef:
ir = builder.mapper.type_to_ir[cdef.info]
in_non_ext = not ir.is_ext_class
class_name = cdef.name
if is_singledispatch:
func_name = singledispatch_main_func_name(name)
else:
func_name = name
builder.enter(FuncInfo(fitem, func_name, class_name, gen_func_ns(builder),
is_nested, contains_nested, is_decorated, in_non_ext))
# Functions that contain nested functions need an environment class to store variables that
# are free in their nested functions. Generator functions need an environment class to
# store a variable denoting the next instruction to be executed when the __next__ function
# is called, along with all the variables inside the function itself.
if builder.fn_info.contains_nested or builder.fn_info.is_generator:
setup_env_class(builder)
if builder.fn_info.is_nested or builder.fn_info.in_non_ext:
setup_callable_class(builder)
if builder.fn_info.is_generator:
# Do a first-pass and generate a function that just returns a generator object.
gen_generator_func(builder)
args, _, blocks, ret_type, fn_info = builder.leave()
func_ir, func_reg = gen_func_ir(
builder, args, blocks, sig, fn_info, cdef, is_singledispatch,
)
# Re-enter the FuncItem and visit the body of the function this time.
builder.enter(fn_info)
setup_env_for_generator_class(builder)
load_outer_envs(builder, builder.fn_info.generator_class)
if builder.fn_info.is_nested and isinstance(fitem, FuncDef):
setup_func_for_recursive_call(builder, fitem, builder.fn_info.generator_class)
create_switch_for_generator_class(builder)
add_raise_exception_blocks_to_generator_class(builder, fitem.line)
else:
load_env_registers(builder)
gen_arg_defaults(builder)
if builder.fn_info.contains_nested and not builder.fn_info.is_generator:
finalize_env_class(builder)
builder.ret_types[-1] = sig.ret_type
# Add all variables and functions that are declared/defined within this
# function and are referenced in functions nested within this one to this
# function's environment class so the nested functions can reference
# them even if they are declared after the nested function's definition.
# Note that this is done before visiting the body of this function.
env_for_func: Union[FuncInfo, ImplicitClass] = builder.fn_info
if builder.fn_info.is_generator:
env_for_func = builder.fn_info.generator_class
elif builder.fn_info.is_nested or builder.fn_info.in_non_ext:
env_for_func = builder.fn_info.callable_class
if builder.fn_info.fitem in builder.free_variables:
# Sort the variables to keep things deterministic
for var in sorted(builder.free_variables[builder.fn_info.fitem],
key=lambda x: x.name):
if isinstance(var, Var):
rtype = builder.type_to_rtype(var.type)
builder.add_var_to_env_class(var, rtype, env_for_func, reassign=False)
if builder.fn_info.fitem in builder.encapsulating_funcs:
for nested_fn in builder.encapsulating_funcs[builder.fn_info.fitem]:
if isinstance(nested_fn, FuncDef):
# The return type is 'object' instead of an RInstance of the
# callable class because differently defined functions with
# the same name and signature across conditional blocks
# will generate different callable classes, so the callable
# class that gets instantiated must be generic.
builder.add_var_to_env_class(
nested_fn, object_rprimitive, env_for_func, reassign=False
)
builder.accept(fitem.body)
builder.maybe_add_implicit_return()
if builder.fn_info.is_generator:
populate_switch_for_generator_class(builder)
# Hang on to the local symbol table for a while, since we use it
# to calculate argument defaults below.
symtable = builder.symtables[-1]
args, _, blocks, ret_type, fn_info = builder.leave()
if fn_info.is_generator:
add_methods_to_generator_class(
builder, fn_info, sig, args, blocks, fitem.is_coroutine)
else:
func_ir, func_reg = gen_func_ir(
builder, args, blocks, sig, fn_info, cdef, is_singledispatch,
)
# Evaluate argument defaults in the surrounding scope, since we
# calculate them *once* when the function definition is evaluated.
calculate_arg_defaults(builder, fn_info, func_reg, symtable)
if is_singledispatch:
# add the generated main singledispatch function
builder.functions.append(func_ir)
# create the dispatch function
assert isinstance(fitem, FuncDef)
return gen_dispatch_func_ir(builder, fitem, fn_info.name, name, sig)
return func_ir, func_reg
def gen_func_ir(builder: IRBuilder,
args: List[Register],
blocks: List[BasicBlock],
sig: FuncSignature,
fn_info: FuncInfo,
cdef: Optional[ClassDef],
is_singledispatch_main_func: bool = False) -> Tuple[FuncIR, Optional[Value]]:
"""Generate the FuncIR for a function.
This takes the basic blocks and function info of a particular
function and returns the IR. If the function is nested,
also returns the register containing the instance of the
corresponding callable class.
"""
func_reg: Optional[Value] = None
if fn_info.is_nested or fn_info.in_non_ext:
func_ir = add_call_to_callable_class(builder, args, blocks, sig, fn_info)
add_get_to_callable_class(builder, fn_info)
func_reg = instantiate_callable_class(builder, fn_info)
else:
assert isinstance(fn_info.fitem, FuncDef)
func_decl = builder.mapper.func_to_decl[fn_info.fitem]
if fn_info.is_decorated or is_singledispatch_main_func:
class_name = None if cdef is None else cdef.name
func_decl = FuncDecl(fn_info.name, class_name, builder.module_name, sig,
func_decl.kind,
func_decl.is_prop_getter, func_decl.is_prop_setter)
func_ir = FuncIR(func_decl, args, blocks, fn_info.fitem.line,
traceback_name=fn_info.fitem.name)
else:
func_ir = FuncIR(func_decl, args, blocks,
fn_info.fitem.line, traceback_name=fn_info.fitem.name)
return (func_ir, func_reg)
def handle_ext_method(builder: IRBuilder, cdef: ClassDef, fdef: FuncDef) -> None:
# Perform the function of visit_method for methods inside extension classes.
name = fdef.name
class_ir = builder.mapper.type_to_ir[cdef.info]
func_ir, func_reg = gen_func_item(builder, fdef, name, builder.mapper.fdef_to_sig(fdef), cdef)
builder.functions.append(func_ir)
if is_decorated(builder, fdef):
# Obtain the the function name in order to construct the name of the helper function.
_, _, name = fdef.fullname.rpartition('.')
# Read the PyTypeObject representing the class, get the callable object
# representing the non-decorated method
typ = builder.load_native_type_object(cdef.fullname)
orig_func = builder.py_get_attr(typ, name, fdef.line)
# Decorate the non-decorated method
decorated_func = load_decorated_func(builder, fdef, orig_func)
# Set the callable object representing the decorated method as an attribute of the
# extension class.
builder.call_c(py_setattr_op,
[typ, builder.load_str(name), decorated_func],
fdef.line)
if fdef.is_property:
# If there is a property setter, it will be processed after the getter,
# We populate the optional setter field with none for now.
assert name not in class_ir.properties
class_ir.properties[name] = (func_ir, None)
elif fdef in builder.prop_setters:
# The respective property getter must have been processed already
assert name in class_ir.properties
getter_ir, _ = class_ir.properties[name]
class_ir.properties[name] = (getter_ir, func_ir)
class_ir.methods[func_ir.decl.name] = func_ir
# If this overrides a parent class method with a different type, we need
# to generate a glue method to mediate between them.
for base in class_ir.mro[1:]:
if (name in base.method_decls and name != '__init__'
and not is_same_method_signature(class_ir.method_decls[name].sig,
base.method_decls[name].sig)):
# TODO: Support contravariant subtyping in the input argument for
# property setters. Need to make a special glue method for handling this,
# similar to gen_glue_property.
f = gen_glue(builder, base.method_decls[name].sig, func_ir, class_ir, base, fdef)
class_ir.glue_methods[(base, name)] = f
builder.functions.append(f)
# If the class allows interpreted children, create glue
# methods that dispatch via the Python API. These will go in a
# "shadow vtable" that will be assigned to interpreted
# children.
if class_ir.allow_interpreted_subclasses:
f = gen_glue(builder, func_ir.sig, func_ir, class_ir, class_ir, fdef, do_py_ops=True)
class_ir.glue_methods[(class_ir, name)] = f
builder.functions.append(f)
def handle_non_ext_method(
builder: IRBuilder, non_ext: NonExtClassInfo, cdef: ClassDef, fdef: FuncDef) -> None:
# Perform the function of visit_method for methods inside non-extension classes.
name = fdef.name
func_ir, func_reg = gen_func_item(builder, fdef, name, builder.mapper.fdef_to_sig(fdef), cdef)
assert func_reg is not None
builder.functions.append(func_ir)
if is_decorated(builder, fdef):
# The undecorated method is a generated callable class
orig_func = func_reg
func_reg = load_decorated_func(builder, fdef, orig_func)
# TODO: Support property setters in non-extension classes
if fdef.is_property:
prop = builder.load_module_attr_by_fullname('builtins.property', fdef.line)
func_reg = builder.py_call(prop, [func_reg], fdef.line)
elif builder.mapper.func_to_decl[fdef].kind == FUNC_CLASSMETHOD:
cls_meth = builder.load_module_attr_by_fullname('builtins.classmethod', fdef.line)
func_reg = builder.py_call(cls_meth, [func_reg], fdef.line)
elif builder.mapper.func_to_decl[fdef].kind == FUNC_STATICMETHOD:
stat_meth = builder.load_module_attr_by_fullname(
'builtins.staticmethod', fdef.line
)
func_reg = builder.py_call(stat_meth, [func_reg], fdef.line)
builder.add_to_non_ext_dict(non_ext, name, func_reg, fdef.line)
def calculate_arg_defaults(builder: IRBuilder,
fn_info: FuncInfo,
func_reg: Optional[Value],
symtable: Dict[SymbolNode, SymbolTarget]) -> None:
"""Calculate default argument values and store them.
They are stored in statics for top level functions and in
the function objects for nested functions (while constants are
still stored computed on demand).
"""
fitem = fn_info.fitem
for arg in fitem.arguments:
# Constant values don't get stored but just recomputed
if arg.initializer and not is_constant(arg.initializer):
value = builder.coerce(
builder.accept(arg.initializer),
symtable[arg.variable].type,
arg.line
)
if not fn_info.is_nested:
name = fitem.fullname + '.' + arg.variable.name
builder.add(InitStatic(value, name, builder.module_name))
else:
assert func_reg is not None
builder.add(SetAttr(func_reg, arg.variable.name, value, arg.line))
def gen_func_ns(builder: IRBuilder) -> str:
"""Generate a namespace for a nested function using its outer function names."""
return '_'.join(info.name + ('' if not info.class_name else '_' + info.class_name)
for info in builder.fn_infos
if info.name and info.name != '<top level>')
def emit_yield(builder: IRBuilder, val: Value, line: int) -> Value:
retval = builder.coerce(val, builder.ret_types[-1], line)
cls = builder.fn_info.generator_class
# Create a new block for the instructions immediately following the yield expression, and
# set the next label so that the next time '__next__' is called on the generator object,
# the function continues at the new block.
next_block = BasicBlock()
next_label = len(cls.continuation_blocks)
cls.continuation_blocks.append(next_block)
builder.assign(cls.next_label_target, Integer(next_label), line)
builder.add(Return(retval))
builder.activate_block(next_block)
add_raise_exception_blocks_to_generator_class(builder, line)
assert cls.send_arg_reg is not None
return cls.send_arg_reg
def handle_yield_from_and_await(builder: IRBuilder, o: Union[YieldFromExpr, AwaitExpr]) -> Value:
# This is basically an implementation of the code in PEP 380.
# TODO: do we want to use the right types here?
result = Register(object_rprimitive)
to_yield_reg = Register(object_rprimitive)
received_reg = Register(object_rprimitive)
if isinstance(o, YieldFromExpr):
iter_val = builder.call_c(iter_op, [builder.accept(o.expr)], o.line)
else:
iter_val = builder.call_c(coro_op, [builder.accept(o.expr)], o.line)
iter_reg = builder.maybe_spill_assignable(iter_val)
stop_block, main_block, done_block = BasicBlock(), BasicBlock(), BasicBlock()
_y_init = builder.call_c(next_raw_op, [builder.read(iter_reg)], o.line)
builder.add(Branch(_y_init, stop_block, main_block, Branch.IS_ERROR))
# Try extracting a return value from a StopIteration and return it.
# If it wasn't, this reraises the exception.
builder.activate_block(stop_block)
builder.assign(result, builder.call_c(check_stop_op, [], o.line), o.line)
builder.goto(done_block)
builder.activate_block(main_block)
builder.assign(to_yield_reg, _y_init, o.line)
# OK Now the main loop!
loop_block = BasicBlock()
builder.goto_and_activate(loop_block)
def try_body() -> None:
builder.assign(
received_reg, emit_yield(builder, builder.read(to_yield_reg), o.line), o.line
)
def except_body() -> None:
# The body of the except is all implemented in a C function to
# reduce how much code we need to generate. It returns a value
# indicating whether to break or yield (or raise an exception).
val = Register(object_rprimitive)
val_address = builder.add(LoadAddress(object_pointer_rprimitive, val))
to_stop = builder.call_c(yield_from_except_op,
[builder.read(iter_reg), val_address], o.line)
ok, stop = BasicBlock(), BasicBlock()
builder.add(Branch(to_stop, stop, ok, Branch.BOOL))
# The exception got swallowed. Continue, yielding the returned value
builder.activate_block(ok)
builder.assign(to_yield_reg, val, o.line)
builder.nonlocal_control[-1].gen_continue(builder, o.line)
# The exception was a StopIteration. Stop iterating.
builder.activate_block(stop)
builder.assign(result, val, o.line)
builder.nonlocal_control[-1].gen_break(builder, o.line)
def else_body() -> None:
# Do a next() or a .send(). It will return NULL on exception
# but it won't automatically propagate.
_y = builder.call_c(
send_op, [builder.read(iter_reg), builder.read(received_reg)], o.line
)
ok, stop = BasicBlock(), BasicBlock()
builder.add(Branch(_y, stop, ok, Branch.IS_ERROR))
# Everything's fine. Yield it.
builder.activate_block(ok)
builder.assign(to_yield_reg, _y, o.line)
builder.nonlocal_control[-1].gen_continue(builder, o.line)
# Try extracting a return value from a StopIteration and return it.
# If it wasn't, this rereaises the exception.
builder.activate_block(stop)
builder.assign(result, builder.call_c(check_stop_op, [], o.line), o.line)
builder.nonlocal_control[-1].gen_break(builder, o.line)
builder.push_loop_stack(loop_block, done_block)
transform_try_except(
builder, try_body, [(None, None, except_body)], else_body, o.line
)
builder.pop_loop_stack()
builder.goto_and_activate(done_block)
return builder.read(result)
def load_decorated_func(builder: IRBuilder, fdef: FuncDef, orig_func_reg: Value) -> Value:
"""Apply decorators to a function.
Given a decorated FuncDef and an instance of the callable class
representing that FuncDef, apply the corresponding decorator
functions on that decorated FuncDef and return the decorated
function.
"""
if not is_decorated(builder, fdef):
# If there are no decorators associated with the function, then just return the
# original function.
return orig_func_reg
decorators = builder.fdefs_to_decorators[fdef]
func_reg = orig_func_reg
for d in reversed(decorators):
decorator = d.accept(builder.visitor)
assert isinstance(decorator, Value)
func_reg = builder.py_call(decorator, [func_reg], func_reg.line)
return func_reg
def is_decorated(builder: IRBuilder, fdef: FuncDef) -> bool:
return fdef in builder.fdefs_to_decorators
def gen_glue(builder: IRBuilder, sig: FuncSignature, target: FuncIR,
cls: ClassIR, base: ClassIR, fdef: FuncItem,
*,
do_py_ops: bool = False
) -> FuncIR:
"""Generate glue methods that mediate between different method types in subclasses.
Works on both properties and methods. See gen_glue_methods below
for more details.
If do_py_ops is True, then the glue methods should use generic
C API operations instead of direct calls, to enable generating
"shadow" glue methods that work with interpreted subclasses.
"""
if fdef.is_property:
return gen_glue_property(builder, sig, target, cls, base, fdef.line, do_py_ops)
else:
return gen_glue_method(builder, sig, target, cls, base, fdef.line, do_py_ops)
class ArgInfo(NamedTuple):
args: List[Value]
arg_names: List[Optional[str]]
arg_kinds: List[ArgKind]
def get_args(builder: IRBuilder, rt_args: Sequence[RuntimeArg], line: int) -> ArgInfo:
# The environment operates on Vars, so we make some up
fake_vars = [(Var(arg.name), arg.type) for arg in rt_args]
args = [builder.read(builder.add_local_reg(var, type, is_arg=True), line)
for var, type in fake_vars]
arg_names = [arg.name
if arg.kind.is_named() or (arg.kind.is_optional() and not arg.pos_only) else None
for arg in rt_args]
arg_kinds = [arg.kind for arg in rt_args]
return ArgInfo(args, arg_names, arg_kinds)
def gen_glue_method(builder: IRBuilder, sig: FuncSignature, target: FuncIR,
cls: ClassIR, base: ClassIR, line: int,
do_pycall: bool,
) -> FuncIR:
"""Generate glue methods that mediate between different method types in subclasses.
For example, if we have:
class A:
def f(builder: IRBuilder, x: int) -> object: ...
then it is totally permissible to have a subclass
class B(A):
def f(builder: IRBuilder, x: object) -> int: ...
since '(object) -> int' is a subtype of '(int) -> object' by the usual
contra/co-variant function subtyping rules.
The trickiness here is that int and object have different
runtime representations in mypyc, so A.f and B.f have
different signatures at the native C level. To deal with this,
we need to generate glue methods that mediate between the
different versions by coercing the arguments and return
values.
If do_pycall is True, then make the call using the C API
instead of a native call.
"""
builder.enter()
builder.ret_types[-1] = sig.ret_type
rt_args = list(sig.args)
if target.decl.kind == FUNC_NORMAL:
rt_args[0] = RuntimeArg(sig.args[0].name, RInstance(cls))
arg_info = get_args(builder, rt_args, line)
args, arg_kinds, arg_names = arg_info.args, arg_info.arg_kinds, arg_info.arg_names
# We can do a passthrough *args/**kwargs with a native call, but if the
# args need to get distributed out to arguments, we just let python handle it
if (
any(kind.is_star() for kind in arg_kinds)
and any(not arg.kind.is_star() for arg in target.decl.sig.args)
):
do_pycall = True
if do_pycall:
if target.decl.kind == FUNC_STATICMETHOD:
# FIXME: this won't work if we can do interpreted subclasses
first = builder.builder.get_native_type(cls)
st = 0
else:
first = args[0]
st = 1
retval = builder.builder.py_method_call(
first, target.name, args[st:], line, arg_kinds[st:], arg_names[st:])
else:
retval = builder.builder.call(target.decl, args, arg_kinds, arg_names, line)
retval = builder.coerce(retval, sig.ret_type, line)
builder.add(Return(retval))
arg_regs, _, blocks, ret_type, _ = builder.leave()
return FuncIR(
FuncDecl(target.name + '__' + base.name + '_glue',
cls.name, builder.module_name,
FuncSignature(rt_args, ret_type),
target.decl.kind),
arg_regs, blocks)
def gen_glue_property(builder: IRBuilder,
sig: FuncSignature,
target: FuncIR,
cls: ClassIR,
base: ClassIR,
line: int,
do_pygetattr: bool) -> FuncIR:
"""Generate glue methods for properties that mediate between different subclass types.
Similarly to methods, properties of derived types can be covariantly subtyped. Thus,
properties also require glue. However, this only requires the return type to change.
Further, instead of a method call, an attribute get is performed.
If do_pygetattr is True, then get the attribute using the Python C
API instead of a native call.
"""
builder.enter()
rt_arg = RuntimeArg(SELF_NAME, RInstance(cls))
self_target = builder.add_self_to_env(cls)
arg = builder.read(self_target, line)
builder.ret_types[-1] = sig.ret_type
if do_pygetattr:
retval = builder.py_get_attr(arg, target.name, line)
else:
retval = builder.add(GetAttr(arg, target.name, line))
retbox = builder.coerce(retval, sig.ret_type, line)
builder.add(Return(retbox))
args, _, blocks, return_type, _ = builder.leave()
return FuncIR(
FuncDecl(target.name + '__' + base.name + '_glue',
cls.name, builder.module_name, FuncSignature([rt_arg], return_type)),
args, blocks)
def get_func_target(builder: IRBuilder, fdef: FuncDef) -> AssignmentTarget:
"""Given a FuncDef, return the target for the instance of its callable class.
If the function was not already defined somewhere, then define it
and add it to the current environment.
"""
if fdef.original_def:
# Get the target associated with the previously defined FuncDef.
return builder.lookup(fdef.original_def)
if builder.fn_info.is_generator or builder.fn_info.contains_nested:
return builder.lookup(fdef)
return builder.add_local_reg(fdef, object_rprimitive)
def load_type(builder: IRBuilder, typ: TypeInfo, line: int) -> Value:
if typ in builder.mapper.type_to_ir:
class_ir = builder.mapper.type_to_ir[typ]
class_obj = builder.builder.get_native_type(class_ir)
elif typ.fullname in builtin_names:
builtin_addr_type, src = builtin_names[typ.fullname]
class_obj = builder.add(LoadAddress(builtin_addr_type, src, line))
else:
class_obj = builder.load_global_str(typ.name, line)
return class_obj
def load_func(builder: IRBuilder, func_name: str, fullname: Optional[str], line: int) -> Value:
if fullname is not None and not fullname.startswith(builder.current_module):
# we're calling a function in a different module
# We can't use load_module_attr_by_fullname here because we need to load the function using
# func_name, not the name specified by fullname (which can be different for underscore
# function)
module = fullname.rsplit('.')[0]
loaded_module = builder.load_module(module)
func = builder.py_get_attr(loaded_module, func_name, line)
else:
func = builder.load_global_str(func_name, line)
return func
def generate_singledispatch_dispatch_function(
builder: IRBuilder,
main_singledispatch_function_name: str,
fitem: FuncDef,
) -> None:
line = fitem.line
current_func_decl = builder.mapper.func_to_decl[fitem]
arg_info = get_args(builder, current_func_decl.sig.args, line)
dispatch_func_obj = builder.self()
arg_type = builder.builder.get_type_of_obj(arg_info.args[0], line)
dispatch_cache = builder.builder.get_attr(
dispatch_func_obj, 'dispatch_cache', dict_rprimitive, line
)
call_find_impl, use_cache, call_func = BasicBlock(), BasicBlock(), BasicBlock()
get_result = builder.call_c(dict_get_method_with_none, [dispatch_cache, arg_type], line)
is_not_none = builder.translate_is_op(get_result, builder.none_object(), 'is not', line)
impl_to_use = Register(object_rprimitive)
builder.add_bool_branch(is_not_none, use_cache, call_find_impl)
builder.activate_block(use_cache)
builder.assign(impl_to_use, get_result, line)
builder.goto(call_func)
builder.activate_block(call_find_impl)
find_impl = builder.load_module_attr_by_fullname('functools._find_impl', line)
registry = load_singledispatch_registry(builder, dispatch_func_obj, line)
uncached_impl = builder.py_call(find_impl, [arg_type, registry], line)
builder.call_c(dict_set_item_op, [dispatch_cache, arg_type, uncached_impl], line)
builder.assign(impl_to_use, uncached_impl, line)
builder.goto(call_func)
builder.activate_block(call_func)
gen_calls_to_correct_impl(builder, impl_to_use, arg_info, fitem, line)
def gen_calls_to_correct_impl(
builder: IRBuilder,
impl_to_use: Value,
arg_info: ArgInfo,
fitem: FuncDef,
line: int,
) -> None:
current_func_decl = builder.mapper.func_to_decl[fitem]
def gen_native_func_call_and_return(fdef: FuncDef) -> None:
func_decl = builder.mapper.func_to_decl[fdef]
ret_val = builder.builder.call(
func_decl, arg_info.args, arg_info.arg_kinds, arg_info.arg_names, line
)
coerced = builder.coerce(ret_val, current_func_decl.sig.ret_type, line)
builder.add(Return(coerced))
typ, src = builtin_names['builtins.int']
int_type_obj = builder.add(LoadAddress(typ, src, line))
is_int = builder.builder.type_is_op(impl_to_use, int_type_obj, line)
native_call, non_native_call = BasicBlock(), BasicBlock()
builder.add_bool_branch(is_int, native_call, non_native_call)
builder.activate_block(native_call)
passed_id = builder.add(Unbox(impl_to_use, int_rprimitive, line))
native_ids = get_native_impl_ids(builder, fitem)
for impl, i in native_ids.items():
call_impl, next_impl = BasicBlock(), BasicBlock()
current_id = builder.load_int(i)
builder.builder.compare_tagged_condition(
passed_id,
current_id,
'==',
call_impl,
next_impl,
line,
)
# Call the registered implementation
builder.activate_block(call_impl)
gen_native_func_call_and_return(impl)
builder.activate_block(next_impl)
# We've already handled all the possible integer IDs, so we should never get here
builder.add(Unreachable())
builder.activate_block(non_native_call)
ret_val = builder.py_call(
impl_to_use, arg_info.args, line, arg_info.arg_kinds, arg_info.arg_names
)
coerced = builder.coerce(ret_val, current_func_decl.sig.ret_type, line)
builder.add(Return(coerced))
def gen_dispatch_func_ir(
builder: IRBuilder,
fitem: FuncDef,
main_func_name: str,
dispatch_name: str,
sig: FuncSignature,
) -> Tuple[FuncIR, Value]:
"""Create a dispatch function (a function that checks the first argument type and dispatches
to the correct implementation)
"""
builder.enter(FuncInfo(fitem, dispatch_name))
setup_callable_class(builder)
builder.fn_info.callable_class.ir.attributes['registry'] = dict_rprimitive
builder.fn_info.callable_class.ir.attributes['dispatch_cache'] = dict_rprimitive
builder.fn_info.callable_class.ir.has_dict = True
builder.fn_info.callable_class.ir.needs_getseters = True
generate_singledispatch_callable_class_ctor(builder)
generate_singledispatch_dispatch_function(builder, main_func_name, fitem)
args, _, blocks, _, fn_info = builder.leave()
dispatch_callable_class = add_call_to_callable_class(builder, args, blocks, sig, fn_info)
builder.functions.append(dispatch_callable_class)
add_get_to_callable_class(builder, fn_info)
add_register_method_to_callable_class(builder, fn_info)
func_reg = instantiate_callable_class(builder, fn_info)
dispatch_func_ir = generate_dispatch_glue_native_function(
builder, fitem, dispatch_callable_class.decl, dispatch_name
)
return dispatch_func_ir, func_reg
def generate_dispatch_glue_native_function(
builder: IRBuilder,
fitem: FuncDef,
callable_class_decl: FuncDecl,
dispatch_name: str,
) -> FuncIR:
line = fitem.line
builder.enter()
# We store the callable class in the globals dict for this function
callable_class = builder.load_global_str(dispatch_name, line)
decl = builder.mapper.func_to_decl[fitem]
arg_info = get_args(builder, decl.sig.args, line)
args = [callable_class] + arg_info.args
arg_kinds = [ArgKind.ARG_POS] + arg_info.arg_kinds
arg_names = arg_info.arg_names
arg_names.insert(0, 'self')
ret_val = builder.builder.call(callable_class_decl, args, arg_kinds, arg_names, line)
builder.add(Return(ret_val))
arg_regs, _, blocks, _, fn_info = builder.leave()
return FuncIR(decl, arg_regs, blocks)
def generate_singledispatch_callable_class_ctor(builder: IRBuilder) -> None:
"""Create an __init__ that sets registry and dispatch_cache to empty dicts"""
line = -1
class_ir = builder.fn_info.callable_class.ir
with builder.enter_method(class_ir, '__init__', bool_rprimitive):
empty_dict = builder.call_c(dict_new_op, [], line)
builder.add(SetAttr(builder.self(), 'registry', empty_dict, line))
cache_dict = builder.call_c(dict_new_op, [], line)
dispatch_cache_str = builder.load_str('dispatch_cache')
# use the py_setattr_op instead of SetAttr so that it also gets added to our __dict__
builder.call_c(py_setattr_op, [builder.self(), dispatch_cache_str, cache_dict], line)
# the generated C code seems to expect that __init__ returns a char, so just return 1
builder.add(Return(Integer(1, bool_rprimitive, line), line))
def add_register_method_to_callable_class(builder: IRBuilder, fn_info: FuncInfo) -> None:
line = -1
with builder.enter_method(fn_info.callable_class.ir, 'register', object_rprimitive):
cls_arg = builder.add_argument('cls', object_rprimitive)
func_arg = builder.add_argument('func', object_rprimitive, ArgKind.ARG_OPT)
ret_val = builder.call_c(register_function, [builder.self(), cls_arg, func_arg], line)
builder.add(Return(ret_val, line))
def load_singledispatch_registry(builder: IRBuilder, dispatch_func_obj: Value, line: int) -> Value:
return builder.builder.get_attr(dispatch_func_obj, 'registry', dict_rprimitive, line)
def singledispatch_main_func_name(orig_name: str) -> str:
return f'__mypyc_singledispatch_main_function_{orig_name}__'
def get_registry_identifier(fitem: FuncDef) -> str:
return f'__mypyc_singledispatch_registry_{fitem.fullname}__'
def maybe_insert_into_registry_dict(builder: IRBuilder, fitem: FuncDef) -> None:
line = fitem.line
is_singledispatch_main_func = fitem in builder.singledispatch_impls
# dict of singledispatch_func to list of register_types (fitem is the function to register)
to_register: DefaultDict[FuncDef, List[TypeInfo]] = defaultdict(list)
for main_func, impls in builder.singledispatch_impls.items():
for dispatch_type, impl in impls:
if fitem == impl:
to_register[main_func].append(dispatch_type)
if not to_register and not is_singledispatch_main_func:
return
if is_singledispatch_main_func:
main_func_name = singledispatch_main_func_name(fitem.name)
main_func_obj = load_func(builder, main_func_name, fitem.fullname, line)
loaded_object_type = builder.load_module_attr_by_fullname('builtins.object', line)
registry_dict = builder.builder.make_dict([(loaded_object_type, main_func_obj)], line)
dispatch_func_obj = builder.load_global_str(fitem.name, line)
builder.call_c(
py_setattr_op, [dispatch_func_obj, builder.load_str('registry'), registry_dict], line
)
for singledispatch_func, types in to_register.items():
# TODO: avoid recomputing the native IDs for all the functions every time we find a new
# function
native_ids = get_native_impl_ids(builder, singledispatch_func)
if fitem not in native_ids:
to_insert = load_func(builder, fitem.name, fitem.fullname, line)
else:
current_id = native_ids[fitem]
load_literal = LoadLiteral(current_id, object_rprimitive)
to_insert = builder.add(load_literal)
# TODO: avoid reloading the registry here if we just created it
dispatch_func_obj = load_func(
builder, singledispatch_func.name, singledispatch_func.fullname, line
)
registry = load_singledispatch_registry(builder, dispatch_func_obj, line)
for typ in types:
loaded_type = load_type(builder, typ, line)
builder.call_c(dict_set_item_op, [registry, loaded_type, to_insert], line)
dispatch_cache = builder.builder.get_attr(
dispatch_func_obj, 'dispatch_cache', dict_rprimitive, line
)
builder.gen_method_call(dispatch_cache, 'clear', [], None, line)
def get_native_impl_ids(builder: IRBuilder, singledispatch_func: FuncDef) -> Dict[FuncDef, int]:
"""Return a dict of registered implementation to native implementation ID for all
implementations
"""
impls = builder.singledispatch_impls[singledispatch_func]
return {impl: i for i, (typ, impl) in enumerate(impls) if not is_decorated(builder, impl)}
| 41.584121
| 99
| 0.69204
|
fbe74a7214dae8391c52014303a896decfb10fd6
| 2,901
|
py
|
Python
|
tracardi/process_engine/action/v1/time/day_night/plugin.py
|
stefaanneyts/tracardi
|
1e393a78e230c2d5afc16fcf0046d7a634507dba
|
[
"MIT"
] | 1
|
2021-11-17T00:45:29.000Z
|
2021-11-17T00:45:29.000Z
|
tracardi/process_engine/action/v1/time/day_night/plugin.py
|
stefaanneyts/tracardi
|
1e393a78e230c2d5afc16fcf0046d7a634507dba
|
[
"MIT"
] | null | null | null |
tracardi/process_engine/action/v1/time/day_night/plugin.py
|
stefaanneyts/tracardi
|
1e393a78e230c2d5afc16fcf0046d7a634507dba
|
[
"MIT"
] | null | null | null |
import re
from tracardi_plugin_sdk.domain.register import Plugin, Spec, MetaData, Form, FormGroup, FormField, FormComponent, \
Documentation, PortDoc
from tracardi_plugin_sdk.action_runner import ActionRunner
from tracardi_plugin_sdk.domain.result import Result
from .model.configuration import Configuration
from .service.day_night_checker import is_day
def validate(config: dict):
return Configuration(**config)
class DayNightAction(ActionRunner):
def __init__(self, **kwargs):
self.config = validate(kwargs)
async def run(self, payload):
dot = self._get_dot_accessor(payload)
latitude = dot[self.config.latitude]
longitude = dot[self.config.longitude]
if is_day(longitude, latitude):
return Result(value=payload, port="day"), Result(value=None, port="night")
return Result(value=None, port="day"), Result(value=payload, port="night")
def register() -> Plugin:
return Plugin(
start=False,
debug=False,
spec=Spec(
module='tracardi.process_engine.action.v1.time.day_night.plugin',
className='DayNightAction',
inputs=['payload'],
outputs=["day", "night"],
manual='day_night_split_action',
init={
"latitude": None,
"longitude": None
},
version="0.6.0.1",
form=Form(groups=[
FormGroup(
fields=[
FormField(
id="latitude",
name="Latitude",
description="Path to latitude data or latitude itself.",
component=FormComponent(type="dotPath", props={"label": "Latitude"})
),
FormField(
id="longitude",
name="Longitude",
description="Path to longitude data or longitude itself.",
component=FormComponent(type="dotPath", props={"label": "longitude"})
)
]
),
]),
),
metadata=MetaData(
name='Day/Night',
desc='Splits workflow whether it is day or night in a given latitude, longitude.',
type='flowNode',
width=200,
height=100,
icon='dark-light',
group=["Time"],
documentation=Documentation(
inputs={
"payload": PortDoc(desc="Reads payload object.")
},
outputs={
"day": PortDoc(desc="Returns input payload if it is a day."),
"night": PortDoc(desc="Returns input payload if it is a night"),
}
)
)
)
| 34.535714
| 116
| 0.518097
|
70570c483d018729c655a218c0de8028feaca517
| 2,247
|
py
|
Python
|
app/core/tests/test_models.py
|
jburr032/recipe-app-api
|
53157d15b906046669b97a97796d5ecab54be829
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
jburr032/recipe-app-api
|
53157d15b906046669b97a97796d5ecab54be829
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
jburr032/recipe-app-api
|
53157d15b906046669b97a97796d5ecab54be829
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='_test_11_@test.com', password='password'):
""" Create a sample user helper """
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an eamil is successfull"""
email = 'londonappteam@email.com'
password = 'password123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test that new user emails are normalized by being lowercase"""
email = 'test@TEST.COM'
user = get_user_model().objects.create_user(email, '123password')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Tests that a new user has passed-in an email address"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, '123password')
def test_creating_new_superuser(self):
"""Tests that a new superuser is created"""
email = 'test@test.com'
user = get_user_model().objects.create_superuser(email, '123password')
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_tag_str(self):
""" Test the tag string representation """
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
# Django allows you to specify what value you want returned
# when casting to data types - here we are casting the obj
# 'tag' to string (__str__), so we want the 'name' value to be returned
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
""" Test the ingredient string representation """
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name="Pepper"
)
self.assertEqual(str(ingredient), ingredient.name)
| 33.044118
| 79
| 0.656431
|
130f41656503f9a34fd09000bd738baaad1e9d72
| 3,715
|
py
|
Python
|
coin.py
|
supertren/cryptocurrency
|
2e8f2f1f57b614cf0d66458b115aa50e7420dbce
|
[
"MIT"
] | null | null | null |
coin.py
|
supertren/cryptocurrency
|
2e8f2f1f57b614cf0d66458b115aa50e7420dbce
|
[
"MIT"
] | null | null | null |
coin.py
|
supertren/cryptocurrency
|
2e8f2f1f57b614cf0d66458b115aa50e7420dbce
|
[
"MIT"
] | null | null | null |
import hashlib
import time
class Block(object):
def __init__(self, index, proof_number, previous_hash, data, timestamp=None):
self.index = index
self.proof_number = proof_number
self.previous_hash = previous_hash
self.data = data
self.timestamp = timestamp or time.time()
@property
def compute_hash(self):
string_block = "{}{}{}{}{}".format(self.index, self.proof_number, self.previous_hash, self.data, self.timestamp)
#return hashlib.sha(string_block.encode()).hexdigest()a
return hashlib.sha256(string_block.encode()).hexdigest()
def __repr__(self):
return "{} - {} - {} - {} - {}".format(self.index, self.proof_number, self.previous_hash, self.data, self.timestamp)
class BlockChain(object):
def __init__(self):
self.chain = []
self.current_data = []
self.nodes = set()
self.build_genesis()
def build_genesis(self):
self.build_block(proof_number=0, previous_hash=0)
def build_block(self, proof_number, previous_hash):
block = Block(
index=len(self.chain),
proof_number=proof_number,
previous_hash=previous_hash,
data=self.current_data
)
self.current_data = []
self.chain.append(block)
return block
@staticmethod
def confirm_validity(block, previous_block):
if previous_block.index + 1 != block.index:
return False
elif previous_block.compute_hash != block.previous_hash:
return False
elif block.timestamp <= previous_block.timestamp:
return False
return True
def get_data(self, sender, receiver, amount):
self.current_data.append({
'sender': sender,
'receiver': receiver,
'amount': amount
})
return True
@staticmethod
def proof_of_work(last_proof):
pass
@property
def latest_block(self):
return self.chain[-1]
def chain_validity(self):
pass
def block_mining(self, details_miner):
self.get_data(
sender="", #it implies that this node has created a new block
receiver=details_miner,
quantity=1, #creating a new block (or identifying the proof number) is awared with
)
last_block = self.latest_block
last_proof_number = last_block.proof_number
proof_number = self.proof_of_work(last_proof_number)
last_hash = last_block.compute_hash
block = self.build_block(proof_number, last_hash)
return vars(block)
def create_node(self, address):
self.nodes.add(address)
return True
@staticmethod
def get_block_object(block_data):
return Block(
block_data['index'],
block_data['proof_number'],
block_data['previous_hash'],
block_data['data'],
timestamp=block_data['timestamp']
)
blockchain = BlockChain()
print("Get Ready .... mining about to start")
print(blockchain.chain)
last_block = blockchain.latest_block
last_proof_number = last_block.proof_number
proof_number = blockchain.proof_of_work(last_proof_number)
blockchain.get_data(
sender="", #this means that this node has constructed another block
receiver="supertren_foroestafas__mouse_is_in_the_house",
amount=1, #building a new block (or figuring out the proof number) is awarded with
)
last_hash = last_block.compute_hash
block = blockchain.build_block(proof_number, last_hash)
print("WOW, MINING HAS BEEN SUCCESSFUL!")
print(blockchain.chain)
| 21.473988
| 124
| 0.637147
|
d878895b1f07b4443ca1864c51ddf434dd1f2eae
| 5,859
|
py
|
Python
|
Day 47/classifiers/linear_classifier.py
|
vgaurav3011/100-Days-of-ML
|
ec302b03fd492c459cff2592b3a4f5e38f9c9d72
|
[
"MIT"
] | 12
|
2020-03-30T15:10:48.000Z
|
2021-11-08T06:04:01.000Z
|
Day 47/classifiers/linear_classifier.py
|
vgaurav3011/100-Days-of-ML
|
ec302b03fd492c459cff2592b3a4f5e38f9c9d72
|
[
"MIT"
] | 3
|
2021-06-08T22:34:58.000Z
|
2022-01-13T03:25:23.000Z
|
Day 47/classifiers/linear_classifier.py
|
vgaurav3011/100-Days-of-ML
|
ec302b03fd492c459cff2592b3a4f5e38f9c9d72
|
[
"MIT"
] | 3
|
2020-04-13T09:51:28.000Z
|
2021-04-28T07:37:36.000Z
|
from __future__ import print_function
from builtins import range
from builtins import object
import numpy as np
from cs231n.classifiers.linear_svm import *
from cs231n.classifiers.softmax import *
from past.builtins import xrange
class LinearClassifier(object):
def __init__(self):
self.W = None
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
if self.W is None:
# lazily initialize W
self.W = 0.001 * np.random.randn(dim, num_classes)
# Run stochastic gradient descent to optimize W
loss_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (batch_size, dim) #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
batch_indices = np.random.choice(num_trains, batch_size, replace=False)
X_batch = X_batch[batch_indices]
y_batch = y_batch[batch_indices]
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# evaluate loss and gradient
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
# perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
self.W = self.W - learning_rate*grad
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
return loss_history
def predict(self, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = np.zeros(X.shape[0])
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
scores = X.dot(self.W)
y_pred = scores.argmax(axis=1)
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return y_pred
def loss(self, X_batch, y_batch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- X_batch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
| 41.553191
| 96
| 0.521249
|
3f1fe7e2cdeffbf39aac43beeff40ccf92ce1fbd
| 22,891
|
py
|
Python
|
brian2/devices/device.py
|
rgerkin/brian2
|
34761a58b0d4c2275194e648449419b3dd73286b
|
[
"BSD-2-Clause"
] | 1
|
2019-08-17T21:19:03.000Z
|
2019-08-17T21:19:03.000Z
|
brian2/devices/device.py
|
rgerkin/brian2
|
34761a58b0d4c2275194e648449419b3dd73286b
|
[
"BSD-2-Clause"
] | null | null | null |
brian2/devices/device.py
|
rgerkin/brian2
|
34761a58b0d4c2275194e648449419b3dd73286b
|
[
"BSD-2-Clause"
] | null | null | null |
'''
Module containing the `Device` base class as well as the `RuntimeDevice`
implementation and some helper functions to access/set devices.
'''
from __future__ import absolute_import
from weakref import WeakKeyDictionary
import numbers
import numpy as np
from brian2.memory.dynamicarray import DynamicArray, DynamicArray1D
from brian2.codegen.targets import codegen_targets
from brian2.codegen.runtime.numpy_rt import NumpyCodeObject
from brian2.core.names import find_name
from brian2.core.preferences import prefs
from brian2.core.variables import ArrayVariable, DynamicArrayVariable
from brian2.core.functions import Function
from brian2.units import ms
from brian2.utils.logger import get_logger
from brian2.utils.stringtools import code_representation, indent
__all__ = ['Device', 'RuntimeDevice',
'get_device', 'set_device',
'all_devices', 'reinit_devices',
'reset_device', 'device', 'seed'
]
logger = get_logger(__name__)
all_devices = {}
prefs.register_preferences('devices', 'Device preferences')
#: caches the automatically determined code generation target
_auto_target = None
def auto_target():
'''
Automatically chose a code generation target (invoked when the
`codegen.target` preference is set to `'auto'`. Caches its result so it
only does the check once. Prefers weave > cython > numpy.
Returns
-------
target : class derived from `CodeObject`
The target to use
'''
global _auto_target
if _auto_target is None:
target_dict = dict((target.class_name, target)
for target in codegen_targets
if target.class_name)
using_fallback = False
if 'weave' in target_dict and target_dict['weave'].is_available():
_auto_target = target_dict['weave']
elif 'cython' in target_dict and target_dict['cython'].is_available():
_auto_target = target_dict['cython']
else:
_auto_target = target_dict['numpy']
using_fallback = True
if using_fallback:
logger.info('Cannot use compiled code, falling back to the numpy '
'code generation target. Note that this will likely '
'be slower than using compiled code. Set the code '
'generation to numpy manually to avoid this message:\n'
'prefs.codegen.target = "numpy"',
'codegen_fallback', once=True)
else:
logger.debug(('Chosing %r as the code generation '
'target.') % _auto_target.class_name)
return _auto_target
class Device(object):
'''
Base Device object.
'''
def __init__(self):
#: The network schedule that this device supports. If the device only
#: supports a specific, fixed schedule, it has to set this attribute to
#: the respective schedule (see `Network.schedule` for details). If it
#: supports arbitrary schedules, it should be set to ``None`` (the
#: default).
self.network_schedule = None
self.defaultclock = None
self._maximum_run_time = None
self._state_tuple = (self.__module__, self.__class__.__name__)
def _set_maximum_run_time(self, maximum_run_time):
'''
Sets a maximum time for a run before it will break. Used primarily for testing purposes. Not guaranteed to be
respected by a device.
'''
self._maximum_run_time = maximum_run_time
def get_array_name(self, var, access_data=True):
'''
Return a globally unique name for `var`.
Parameters
----------
access_data : bool, optional
For `DynamicArrayVariable` objects, specifying `True` here means the
name for the underlying data is returned. If specifying `False`,
the name of object itself is returned (e.g. to allow resizing).
Returns
-------
name : str
The name for `var`.
'''
raise NotImplementedError()
def get_len(self, array):
'''
Return the length of the array.
Parameters
----------
array : `ArrayVariable`
The array for which the length is requested.
Returns
-------
l : int
The length of the array.
'''
raise NotImplementedError()
def add_array(self, var):
'''
Add an array to this device.
Parameters
----------
var : `ArrayVariable`
The array to add.
'''
raise NotImplementedError()
def init_with_zeros(self, var, dtype):
'''
Initialize an array with zeros.
Parameters
----------
var : `ArrayVariable`
The array to initialize with zeros.
dtype : `dtype`
The data type to use for the array.
'''
raise NotImplementedError()
def init_with_arange(self, var, start, dtype):
'''
Initialize an array with an integer range.
Parameters
----------
var : `ArrayVariable`
The array to fill with the integer range.
start : int
The start value for the integer range
dtype : `dtype`
The data type to use for the array.
'''
raise NotImplementedError()
def fill_with_array(self, var, arr):
'''
Fill an array with the values given in another array.
Parameters
----------
var : `ArrayVariable`
The array to fill.
arr : `ndarray`
The array values that should be copied to `var`.
'''
raise NotImplementedError()
def spike_queue(self, source_start, source_end):
'''
Create and return a new `SpikeQueue` for this `Device`.
Parameters
----------
source_start : int
The start index of the source group (necessary for subgroups)
source_end : int
The end index of the source group (necessary for subgroups)
'''
raise NotImplementedError()
def resize(self, var, new_size):
'''
Resize a `DynamicArrayVariable`.
Parameters
----------
var : `DynamicArrayVariable`
The variable that should be resized.
new_size : int
The new size of the variable
'''
raise NotImplementedError()
def resize_along_first(self, var, new_size):
# Can be overwritten with a better implementation
return self.resize(var, new_size)
def seed(self, seed=None):
'''
Set the seed for the random number generator.
Parameters
----------
seed : int, optional
The seed value for the random number generator, or ``None`` (the
default) to set a random seed.
'''
raise NotImplementedError()
def code_object_class(self, codeobj_class=None, fallback_pref='codegen.target'):
'''
Return `CodeObject` class according to input/default settings
Parameters
----------
codeobj_class : a `CodeObject` class, optional
If this is keyword is set to None or no arguments are given, this method will return
the default.
fallback_pref : str, optional
String describing which attribute of prefs to access to retrieve the 'default' target.
Usually this is codegen.target, but in some cases we want to use object-specific targets
such as codegen.string_expression_target.
Returns
-------
codeobj_class : class
The `CodeObject` class that should be used
'''
if isinstance(codeobj_class, str):
raise TypeError("codeobj_class argument given to code_object_class device method "
"should be a CodeObject class, not a string. You can, however, "
"send a string description of the target desired for the CodeObject "
"under the keyword fallback_pref")
if codeobj_class is None:
codeobj_class = prefs[fallback_pref]
if isinstance(codeobj_class, str):
if codeobj_class == 'auto':
return auto_target()
for target in codegen_targets:
if target.class_name == codeobj_class:
return target
# No target found
targets = ['auto'] + [target.class_name
for target in codegen_targets
if target.class_name]
raise ValueError("Unknown code generation target: %s, should be "
" one of %s" % (codeobj_class, targets))
else:
return codeobj_class
def code_object(self, owner, name, abstract_code, variables, template_name,
variable_indices, codeobj_class=None,
template_kwds=None, override_conditional_write=None,
compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
name = find_name(name)
codeobj_class = self.code_object_class(codeobj_class)
template = getattr(codeobj_class.templater, template_name)
iterate_all = template.iterate_all
generator = codeobj_class.generator_class(variables=variables,
variable_indices=variable_indices,
owner=owner,
iterate_all=iterate_all,
codeobj_class=codeobj_class,
override_conditional_write=override_conditional_write,
allows_scalar_write=template.allows_scalar_write,
name=name,
template_name=template_name)
if template_kwds is None:
template_kwds = dict()
else:
template_kwds = template_kwds.copy()
logger.diagnostic('%s abstract code:\n%s' % (name, indent(code_representation(abstract_code))))
scalar_code, vector_code, kwds = generator.translate(abstract_code,
dtype=prefs['core.default_float_dtype'])
# Add the array names as keywords as well
for varname, var in variables.items():
if isinstance(var, ArrayVariable):
pointer_name = generator.get_array_name(var)
if var.scalar:
pointer_name += '[0]'
template_kwds[varname] = pointer_name
if hasattr(var, 'resize'):
dyn_array_name = generator.get_array_name(var,
access_data=False)
template_kwds['_dynamic_'+varname] = dyn_array_name
template_kwds.update(kwds)
logger.diagnostic('%s snippet (scalar):\n%s' % (name, indent(code_representation(scalar_code))))
logger.diagnostic('%s snippet (vector):\n%s' % (name, indent(code_representation(vector_code))))
code = template(scalar_code, vector_code,
owner=owner, variables=variables, codeobj_name=name,
variable_indices=variable_indices,
get_array_name=generator.get_array_name,
**template_kwds)
logger.diagnostic('%s code:\n%s' % (name, indent(code_representation(code))))
codeobj = codeobj_class(owner, code, variables, variable_indices,
template_name=template_name,
template_source=template.template_source,
name=name, compiler_kwds=compiler_kwds)
codeobj.compile()
return codeobj
def activate(self, build_on_run=True, **kwargs):
'''
Called when this device is set as the current device.
'''
from brian2.core.clocks import Clock # avoid import issues
if self.defaultclock is None:
self.defaultclock = Clock(dt=0.1*ms, name='defaultclock')
self._set_maximum_run_time(None)
self.build_on_run = build_on_run
self.build_options = dict(kwargs)
def insert_device_code(self, slot, code):
# Deprecated
raise AttributeError("The method 'insert_device_code' has been renamed "
"to 'insert_code'.")
def insert_code(self, slot, code):
'''
Insert code directly into a given slot in the device. By default does nothing.
'''
logger.warn("Ignoring device code, unknown slot: %s, code: %s" % (slot, code))
def build(self, **kwds):
'''
For standalone projects, called when the project is ready to be built. Does nothing for runtime mode.
'''
pass
def reinit(self):
'''
Reinitialize the device. For standalone devices, clears all the internal
state of the device.
'''
pass
class RuntimeDevice(Device):
'''
The default device used in Brian, state variables are stored as numpy
arrays in memory.
'''
def __init__(self):
super(RuntimeDevice, self).__init__()
#: Mapping from `Variable` objects to numpy arrays (or `DynamicArray`
#: objects). Arrays in this dictionary will disappear as soon as the
#: last reference to the `Variable` object used as a key is gone
self.arrays = WeakKeyDictionary()
# Note that the buffers only store a pointer to the actual random
# numbers -- the buffer will be filled in weave/Cython code
self.randn_buffer = np.zeros(1, dtype=np.intp)
self.rand_buffer = np.zeros(1, dtype=np.intp)
self.randn_buffer_index = np.zeros(1, dtype=np.int32)
self.rand_buffer_index = np.zeros(1, dtype=np.int32)
def get_array_name(self, var, access_data=True):
# if no owner is set, this is a temporary object (e.g. the array
# of indices when doing G.x[indices] = ...). The name is not
# necessarily unique over several CodeObjects in this case.
owner_name = getattr(var.owner, 'name', 'temporary')
if isinstance(var, DynamicArrayVariable):
if access_data:
return '_array_' + owner_name + '_' + var.name
else:
return '_dynamic_array_' + owner_name + '_' + var.name
elif isinstance(var, ArrayVariable):
return '_array_' + owner_name + '_' + var.name
else:
raise TypeError(('Do not have a name for variable of type '
'%s') % type(var))
def add_array(self, var):
# This creates the actual numpy arrays (or DynamicArrayVariable objects)
if isinstance(var, DynamicArrayVariable):
if var.ndim == 1:
arr = DynamicArray1D(var.size, dtype=var.dtype)
else:
arr = DynamicArray(var.size, dtype=var.dtype)
else:
arr = np.empty(var.size, dtype=var.dtype)
self.arrays[var] = arr
def get_value(self, var, access_data=True):
if isinstance(var, DynamicArrayVariable) and access_data:
return self.arrays[var].data
else:
return self.arrays[var]
def set_value(self, var, value):
self.arrays[var][:] = value
def resize(self, var, new_size):
self.arrays[var].resize(new_size)
def resize_along_first(self, var, new_size):
self.arrays[var].resize_along_first(new_size)
def init_with_zeros(self, var, dtype):
self.arrays[var][:] = 0
def init_with_arange(self, var, start, dtype):
self.arrays[var][:] = np.arange(start, stop=var.get_len()+start,
dtype=dtype)
def fill_with_array(self, var, arr):
self.arrays[var][:] = arr
def spike_queue(self, source_start, source_end):
# Use the C++ version of the SpikeQueue when available
try:
from brian2.synapses.cythonspikequeue import SpikeQueue
logger.diagnostic('Using the C++ SpikeQueue', once=True)
except ImportError:
from brian2.synapses.spikequeue import SpikeQueue
logger.diagnostic('Using the Python SpikeQueue', once=True)
return SpikeQueue(source_start=source_start, source_end=source_end)
def seed(self, seed=None):
'''
Set the seed for the random number generator.
Parameters
----------
seed : int, optional
The seed value for the random number generator, or ``None`` (the
default) to set a random seed.
'''
np.random.seed(seed)
self.rand_buffer_index[:] = 0
self.randn_buffer_index[:] = 0
class Dummy(object):
'''
Dummy object
'''
def __getattr__(self, name):
return Dummy()
def __call__(self, *args, **kwds):
return Dummy()
def __enter__(self):
return Dummy()
def __exit__(self, type, value, traceback):
pass
def __getitem__(self, i):
return Dummy()
def __setitem__(self, i, val):
pass
class CurrentDeviceProxy(object):
'''
Method proxy for access to the currently active device
'''
def __getattr__(self, name):
if not hasattr(active_device, name):
if name.startswith('_'):
# Do not fake private/magic attributes
raise AttributeError(('Active device does not have an '
'attribute %s') % name)
else:
logger.warn(("Active device does not have an attribute '%s', "
"ignoring this") % name)
attr = Dummy()
else:
attr = getattr(active_device, name)
return attr
#: Proxy object to access methods of the current device
device = CurrentDeviceProxy()
#: The currently active device (set with `set_device`)
active_device = None
def get_device():
'''
Gets the actve `Device` object
'''
global active_device
return active_device
#: A stack of previously set devices as a tuple with their options (see
#: `set_device`): (device, build_on_run, build_options)
previous_devices = []
def set_device(device, build_on_run=True, **kwargs):
'''
Set the device used for simulations.
Parameters
----------
device : `Device` or str
The `Device` object or the name of the device.
build_on_run : bool, optional
Whether a call to `run` (or `Network.run`) should directly trigger a
`Device.build`. This is only relevant for standalone devices and means
that a run call directly triggers the start of a simulation. If the
simulation consists of multiple run calls, set ``build_on_run`` to
``False`` and call `Device.build` explicitly. Defaults to ``True``.
kwargs : dict, optional
Only relevant when ``build_on_run`` is ``True``: additional arguments
that will be given to the `Device.build` call.
'''
global previous_devices
if active_device is not None:
prev_build_on_run = getattr(active_device, 'build_on_run', True)
prev_build_options = getattr(active_device, 'build_options', {})
previous_devices.append((active_device,
prev_build_on_run,
prev_build_options))
_do_set_device(device, build_on_run, **kwargs)
def _do_set_device(device, build_on_run=True, **kwargs):
global active_device
if isinstance(device, str):
device = all_devices[device]
if active_device is not None and active_device.defaultclock is not None:
previous_dt = active_device.defaultclock.dt
else:
previous_dt = None
active_device = device
active_device.activate(build_on_run=build_on_run, **kwargs)
if previous_dt is not None:
# Copy over the dt information of the defaultclock
active_device.defaultclock.dt = previous_dt
def reset_device(device=None):
'''
Reset to a previously used device. Restores also the previously specified
build options (see `set_device`) for the device. Mostly useful for internal
Brian code and testing on various devices.
Parameters
----------
device : `Device` or str, optional
The device to go back to. If none is specified, go back to the device
chosen with `set_device` before the current one.
'''
global previous_devices
if isinstance(device, str):
device = all_devices[device]
if len(previous_devices) == 0 and device is None:
device = runtime_device
build_on_run = True
build_options = {}
elif device is None:
device, build_on_run, build_options = previous_devices.pop()
else:
build_on_run = device.build_on_run
build_options = device.build_options
_do_set_device(device, build_on_run, **build_options)
def reinit_devices():
'''
Reinitialize all devices, call `Device.activate` again on the current
device and reset the preferences. Used as a "teardown" function in testing,
if users want to reset their device (e.g. for multiple standalone runs in a
single script), calling ``device.reinit()`` followed by
``device.activate()`` should normally be sufficient.
Notes
-----
This also resets the `defaultclock`, i.e. a non-standard ``dt`` has to be
set again.
'''
from brian2 import restore_initial_state # avoids circular import
for device in all_devices.values():
device.reinit()
if active_device is not None:
# Reactivate the current device
reset_device(active_device)
restore_initial_state()
def seed(seed=None):
'''
Set the seed for the random number generator.
Parameters
----------
seed : int, optional
The seed value for the random number generator, or ``None`` (the
default) to set a random seed.
Notes
-----
This function delegates the call to `Device.seed` of the current device.
'''
if seed is not None and not isinstance(seed, numbers.Integral):
raise TypeError('Seed has to be None or an integer, was '
'%s' % type(seed))
get_device().seed(seed)
runtime_device = RuntimeDevice()
all_devices['runtime'] = runtime_device
| 35.711388
| 117
| 0.598052
|
9ff1a37e41f3b60527c9439cf56a08c734ae2b6d
| 846
|
py
|
Python
|
app/core/migrations/0004_recipe.py
|
Lucieo/cook-book-api
|
08dea66fc74d1131907d97eb6051a178851e6ce7
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
Lucieo/cook-book-api
|
08dea66fc74d1131907d97eb6051a178851e6ce7
|
[
"MIT"
] | null | null | null |
app/core/migrations/0004_recipe.py
|
Lucieo/cook-book-api
|
08dea66fc74d1131907d97eb6051a178851e6ce7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-03-01 16:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredient'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('price', models.FloatField()),
('time_minute', models.IntegerField()),
('description', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.333333
| 118
| 0.594563
|
cf21bfca97d8caaf1720eb1871c65d149393f614
| 10,852
|
py
|
Python
|
safe_agents/train.py
|
riveSunder/SafeAgents
|
4f473c250e385f431fb6cfc6eab70635b4ea1c39
|
[
"MIT"
] | 1
|
2020-03-18T20:39:14.000Z
|
2020-03-18T20:39:14.000Z
|
safe_agents/train.py
|
riveSunder/SafeAgents
|
4f473c250e385f431fb6cfc6eab70635b4ea1c39
|
[
"MIT"
] | null | null | null |
safe_agents/train.py
|
riveSunder/SafeAgents
|
4f473c250e385f431fb6cfc6eab70635b4ea1c39
|
[
"MIT"
] | null | null | null |
import numpy as np
import argparse
import time
from safe_agents.policies import MLP
from open_safety.envs.balance_bot_env import BalanceBotEnv
#from open_safety_gym.envs.kart_env import KartEnv
#from open_safety_gym.envs.hoverboard_env import HoverboardEnv
import skimage
import skimage.io as sio
def get_fitness(agent, env, epds, get_cost=True, max_steps=1000, save_frames=False):
#epd_rewards = []
#epd_costs = []
total_steps = 0
sum_reward = 0
sum_cost = 0
exp_id = str(int(time.time()))
for epd in range(epds):
steps = 0
done = False
obs = env.reset()
while not done and steps < max_steps:
action = agent.forward(obs)
if len(action.shape) > 1:
action = action.squeeze()
obs, reward, done, info = env.step(action)
sum_reward += reward
sum_cost += info["cost"]
steps += 1
if save_frames:
img = env.render()[2]
sio.imsave("./frames/id{}_epd{}_step{}.png".format(exp_id, epd, steps), img)
total_steps += steps
sum_reward /= max_steps * epds
sum_cost /= max_steps * epds
return sum_reward, sum_cost, total_steps
def get_elite_mean(population, reward, cost, cost_constraint=2.5, pure_rewards=None, rh=False):
if not(rh):
adjusted_cost = [max([cost_constraint, elem]) for elem in cost]
cost_fitness_agent = [[cost, fit, agent.parameters]
for a_cost, cost, fit, agent in \
sorted(zip(adjusted_cost, cost, pure_rewards, population),\
key = lambda trip: [-trip[0], trip[2]], reverse=True)]
fitness = [elem[1] for elem in cost_fitness_agent]
cost = [elem[0] for elem in cost_fitness_agent]
population = [elem[2] for elem in cost_fitness_agent]
else:
fitness_agent = [[fit, agent.parameters, my_cost, r] \
for fit, agent, my_cost, r in \
sorted(zip(reward, population, cost, pure_rewards),\
key = lambda trip: [trip[0]], reverse=True)
]
fitness = [elem[0] for elem in fitness_agent]
cost = [elem[2] for elem in fitness_agent]
population = [elem[1] for elem in fitness_agent]
my_rewards = [elem[3] for elem in fitness_agent]
keep = int(0.125 * len(population))
elite_pop = population[:keep]
elite_cost = cost[:keep]
elite_fitness = fitness[:keep]
if rh:
elite_rewards = my_rewards[:keep]
else:
elite_rewards = fitness[:keep]
print("population mean cost, rewards: {:.3e}, {:.3e}".format(\
np.mean(cost), np.mean(pure_rewards)))
print("elite mean cost, rewards: {:.3e}, {:.3e}".format(\
np.mean(elite_cost), np.mean(elite_rewards)))
param_sum = elite_pop[0]
for agent_idx in range(1,keep):
param_sum += elite_pop[agent_idx]
param_means = param_sum / keep
return [param_means, np.mean(cost), np.mean(elite_cost), \
np.mean(reward), np.mean(elite_rewards)]
def enjoy(env, input_dim, output_dim, max_episodes=1, model=None, \
steps_per_epd=1000, save_frames=False):
# hard-coded params
hid_dim = [32]
exp_id = "eval_{}".format(str(int(time.time())))
results = {"costs": [],
"rewards": [],
"steps": []}
pop_size = 1
population = [MLP(input_dim, output_dim, hid_dim) \
for ii in range(pop_size)]
if model is not None:
param_means = np.load(model)
for ll in range(pop_size):
population[ll].mean = param_means
population[ll].covar *= 1e-3
population[ll].init_params()
for epd in range(max_episodes):
reward, cost, steps = get_fitness(population[0], env, epds=1, get_cost=True,\
max_steps=steps_per_epd, save_frames=save_frames)
print("episode {} accumulated reward, cost = {:.3e}, {:.3e}".format(epd, reward, cost))
results["costs"].append(cost)
results["rewards"].append(reward)
results["steps"].append(steps)
mean_epd_cost = np.mean(results["costs"])
mean_epd_reward = np.mean(results["rewards"])
print("mean episodic reward and cost over {} episodes".format(max_episodes))
print(" reward : {:.3e} +/- {:.3e}".format(mean_epd_reward, np.std(results["rewards"])))
print(" cost : {:.3e} +/- {:.3e}".format(mean_epd_cost, np.std(results["costs"])))
def train_es(env, input_dim, output_dim, pop_size=6, max_gen=100, \
cost_constraint=0.0, cost_penalty=False,\
model=None, reward_hypothesis=False, steps_per_epd=1000):
# hard-coded policy parameters
hid_dim = [32]
es_lr = 1.0 #8e-1
reward_cost_ratio = 5
save_results = True
exp_id = "c{}_rh{}_{}".format(int(cost_constraint*10),\
bool(reward_hypothesis), str(int(time.time()))[-10:])
max_steps = steps_per_epd #2000 #np.min([2000, 250 + 10* gen])
difficulty_threshold = 0.01
# generate a population
population = [MLP(input_dim, output_dim, hid_dim) \
for ii in range(pop_size)]
if model is not None:
param_means = np.load(model)
for ll in range(pop_size):
population[ll].mean = param_means
population[ll].init_params()
results = {"costs": [],
"combined_rc": [],
"rewards": [],
"elite_costs": [],
"elite_rewards": [],
"steps": []}
t0 = time.time()
try:
for gen in range(max_gen):
fitnesses = []
costs = []
total_steps = []
rc_fitnesses = []
t1 = time.time()
for agent_idx in range(len(population)):
epds = 4 #np.max([1,int(10 - gen/10)])
reward, cost, steps = get_fitness(population[agent_idx],\
env, epds=epds, max_steps=max_steps)
fitnesses.append(reward)
total_steps.append(steps)
costs.append(cost)
rc_fitnesses.append(reward - cost / reward_cost_ratio)
if gen:
param_means *= (1 - es_lr)
cost_constraint = float("Inf") if reward_hypothesis else cost_constraint
gen_results = get_elite_mean(population, rc_fitnesses, costs, cost_constraint,\
pure_rewards=fitnesses, rh=reward_hypothesis)
param_means += es_lr * gen_results[0]
else:
cost_constraint = float("Inf") if reward_hypothesis else cost_constraint
gen_results = get_elite_mean(population, rc_fitnesses, costs, cost_constraint,\
pure_rewards=fitnesses, rh=reward_hypothesis)
param_means = gen_results[0]
for ll in range(pop_size):
population[ll].mean = param_means
population[ll].init_params()
results["costs"].append(gen_results[1])
results["elite_costs"].append(gen_results[2])
results["rewards"].append(gen_results[3])
results["elite_rewards"].append(gen_results[4])
results["steps"].append(np.sum(total_steps))
if save_results and gen % 50 == 0:
np.save("./results/means_{}_gen{}.npy".format(\
exp_id, gen), param_means)
np.save("./results/temp_{}_results.npy".format(\
exp_id), results)
elapsed = time.time() - t0
epd_elapsed = time.time() - t1
if min(costs) < difficulty_threshold:
# increase the difficulty (episode duration)
# if any agent manages to fall down for less than a threshold of time steps
max_steps = min([max_steps+1, env.max_steps])
print("gen. {} total steps: {} elapsed total: {:.3f} this gen. {:.3f} max_steps {}"\
.format(\
gen, np.sum(total_steps), elapsed, epd_elapsed, max_steps))
del fitnesses
del costs
del total_steps
except KeyboardInterrupt:
pass
if save_results:
np.save("./results/means_{}_gen{}.npy".format(\
exp_id, gen), param_means)
np.save("./results/temp_{}_results.npy".format(\
exp_id), results)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="args for safe agents")
parser.add_argument("-n", "--env_name", type=str,\
help="name of environment", default="BalanceBot-v0")
parser.add_argument("-a", "--algo", type=str,\
help="training algo", default="es")
parser.add_argument("-c", "--constraint", type=float,\
help="safety constraint", default=9e9)
parser.add_argument("-m", "--model", type=str,\
help="resume parameters stored at filepath (default None)", default=None)
parser.add_argument("-r", "--reward_hypothesis", type=bool,\
help="combine cost and reward (reward hypothesis)", default=False)
parser.add_argument("-p", "--pop_size",type=int,\
help="population size", default=64)
parser.add_argument("-e", "--enjoy", type=bool,\
help="render episodes", default = False)
parser.add_argument("-f", "--save_frames", type=bool,\
help="save frames from episodes", default = False)
parser.add_argument("-g", "--generations", type=int,\
help="number of generations", default=1024)
parser.add_argument("-s", "--steps_per_episode", type=int,\
help="steps per episode ;)", default=1000)
parser.add_argument("-hm", "--how_many_epds", type=int,\
help="number of episodes to enjoy", default=3)
args = parser.parse_args()
constraint = args.constraint
rh = args.reward_hypothesis
model = args.model
pop_size = args.pop_size
render = args.enjoy
save_frames = args.save_frames
steps_per_episode = args.steps_per_episode
max_generations = args.generations
env_name = args.env_name
env = BalanceBotEnv(render=render)
obs_dim = env.observation_space.sample().shape[0]
act_dim = env.action_space.sample().shape[0]
if not(args.enjoy):
train_es(env, obs_dim, act_dim, cost_constraint=constraint, pop_size=pop_size,\
max_gen=max_generations, model=model, reward_hypothesis=rh,\
steps_per_epd=steps_per_episode)
else:
enjoy(env, obs_dim, act_dim, max_episodes=args.how_many_epds,\
model=model, steps_per_epd=1000, save_frames=save_frames)
print("all oK")
| 34.018809
| 96
| 0.586712
|
6c12fac713ddeec07e522e7ace61bb3af3a8c529
| 44,835
|
py
|
Python
|
Blender Exporter/export_model.py
|
razor85/SegaSaturnTools
|
c3dd98937bd978da5641f603de0f3e4b3f32d8c6
|
[
"MIT"
] | 2
|
2021-04-22T13:19:43.000Z
|
2021-10-30T12:41:06.000Z
|
Blender Exporter/export_model.py
|
razor85/SegaSaturnTools
|
c3dd98937bd978da5641f603de0f3e4b3f32d8c6
|
[
"MIT"
] | null | null | null |
Blender Exporter/export_model.py
|
razor85/SegaSaturnTools
|
c3dd98937bd978da5641f603de0f3e4b3f32d8c6
|
[
"MIT"
] | null | null | null |
# Copyright 2020-2021 Romulo Fernandes Machado Leitao <romulo@castorgroup.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import bpy
import bmesh
import copy
import ctypes
import math
import mathutils
import os
import pathlib
import gpu
import sys
import socket
from socket import ntohl
from socket import ntohs
from bpy import *
from math import *
from mathutils import Vector
from pathlib import Path
from bpy.app.handlers import persistent
bl_info = {
"name": "Export Sega Saturn Model (.ssm)",
"category": "Import-Export",
}
# Face flags
FLAG_DITHERING = 1
FLAG_TRANSPARENCY = 2
FLAG_IGNORE_FACE_SIZE = 4
FLAG_GOURAUD = 8
FLAG_SHADOW = 16
EXPORTER_SETTINGS_NAME = 'saturn_ssm_exporter_settings'
TEXTURE_WRITE_TEXTURE_STR = '__texture__'
TEXTURE_WRITE_ATLAS_STR = '__atlas__'
TEXTURE_WRITE_NONE = 'textureWriteNone'
TEXTURE_WRITE_TEX_ONLY = TEXTURE_WRITE_TEXTURE_STR
TEXTURE_WRITE_ATLAS_ONLY = TEXTURE_WRITE_ATLAS_STR
TEXTURE_WRITE_TEX_ATLAS = '{}&&{}'.format(TEXTURE_WRITE_TEXTURE_STR,
TEXTURE_WRITE_ATLAS_STR)
# Fields to save/load on exporter dialog
exportFields = ['filepath', 'animActions', 'checkForDuplicatedTextures',
'useAO', 'texturesSizeByArea', 'minimumTextureSize',
'outputTextureSize', 'saveLogFile', 'textureWriteOptions']
# Hold BMesh for each mesh.
globalMeshes = {}
def interpolateUv(a, b, amount):
assert amount <= 1, 'Amount is in percentages'
assert amount >= 0, 'Amount cant be negative'
newUv = [ 0, 0 ]
dist0 = b[0] - a[0]
newUv[0] = a[0] + (amount * dist0)
dist1 = b[1] - a[1]
newUv[1] = a[1] + (amount * dist1)
return newUv
def writeVector(filePtr, v):
filePtr.write(fixedPoint(v.x))
filePtr.write(fixedPoint(v.y))
filePtr.write(fixedPoint(v.z))
def writeBlenderVector(filePtr, v):
filePtr.write(fixedPoint(v.x))
filePtr.write(fixedPoint(v.z))
filePtr.write(fixedPoint(-v.y))
def closePowerOf2(value, maximum):
newValue = 1
while newValue <= value:
newValue *= 2
return min(newValue, maximum)
def getMeshForFaceIteration(obj):
global globalMeshes
if obj.name in globalMeshes:
objMesh = globalMeshes[obj.name]
else:
objMesh = bmesh.new()
objMesh.from_mesh(obj.data)
objMesh.faces.ensure_lookup_table()
return objMesh
def hasArmature(obj):
return obj.parent is not None and obj.parent.type == 'ARMATURE'
def getVertexBoneIndex(obj, armature, vertex):
groupIndex = vertex.groups[0].group
boneName = obj.vertex_groups[groupIndex].name
return armature.data.bones.find(boneName)
def getBoneIndices(obj):
assert obj.parent.type == 'ARMATURE', 'Object parent must be an armature'
armature = obj.parent
boneIndices = []
for vIndex, vertex in enumerate(obj.data.vertices):
boneIndices.append(getVertexBoneIndex(obj, armature, vertex))
return boneIndices
def removeGamma(pixel, gamma):
newPixel = []
for c in pixel:
newPixel.append(pow(c, 1.0 / gamma))
maxComponent = max(pixel[:3])
if maxComponent > 1.0:
diff = maxComponent - 1.0
newPixel[0] -= diff
newPixel[1] -= diff
newPixel[2] -= diff
for i in range(0, 3):
assert newPixel[i] <= 1.0, 'Pixel is not in conversion interval'
newPixel[i] = int(newPixel[i] * 255)
return newPixel
def rgb555FromPixel(pixel):
normalPixel = removeGamma(pixel, 1.8)
r = ctypes.c_uint8(normalPixel[0])
g = ctypes.c_uint8(normalPixel[1])
b = ctypes.c_uint8(normalPixel[2])
rgb555 = ctypes.c_uint16(((b.value >> 3) << 10)
| ((g.value >> 3) << 5)
| ((r.value >> 3) << 0)).value
return rgb555
# image = [width, height, bpp, pixels]
def getPixel(uv, image):
gammaValue = 1.8
if uv[0] < -1:
uv[0] = 1.0 - (uv[0] - int(uv[0]))
elif uv[0] < 0:
uv[0] = 1.0 + uv[0]
elif uv[0] > 1:
uv[0] = uv[0] - int(uv[0])
if uv[1] < -1:
uv[1] = 1.0 - (uv[1] - int(uv[1]))
elif uv[1] < 0:
uv[1] = 1.0 + uv[1]
elif uv[1] > 1:
uv[1] = uv[1] - int(uv[1])
width, height, bpp, pixels = image
x = int(round(width * uv[0]))
y = int(round(height * uv[1]))
x %= width
y %= height
baseIndex = int(y * width * bpp + x * bpp)
assert baseIndex < len(pixels), 'Pixel index is out of bounds'
newPixels = [ pow(pixel, gammaValue) for pixel in pixels[ baseIndex:baseIndex + bpp ] ]
if len(newPixels) == 3:
newPixels.append(1.0)
return newPixels
def getColor(color):
intColor = [(int)(color[0] * 255), (int)
(color[1] * 255), (int)(color[2] * 255)]
return (intColor[0] << 16) | (intColor[1] << 8) | intColor[2]
def fixedPoint(value):
convertedValue = int(value * 65536.0)
return ctypes.c_int32(ntohl(ctypes.c_uint32(convertedValue).value))
def applyOp(func, data):
dataCopy = copy.deepcopy(data)
for i in range(0, len(data)):
dataCopy[i] = func(data[i])
return dataCopy
def getEditBoneInverseEuler(bone):
euler = bone.id_data.convert_space(pose_bone=bone,
matrix=bone.matrix,
from_space='POSE').to_euler()
euler.rotate(mathutils.Euler([math.radians(-90), 0, 0]))
return euler.to_matrix().inverted_safe().to_euler()
def getPoseBoneEuler(bone):
matrix = bone.id_data.convert_space(pose_bone=bone,
matrix=bone.matrix,
from_space='POSE')
rot90 = mathutils.Euler([math.radians(-90), 0, 0]).to_matrix().to_4x4()
rotatedMatrix = rot90 @ matrix
return rotatedMatrix.to_3x3().to_euler(), rotatedMatrix.to_translation()
def selectObj(obj):
bpy.ops.object.select_all(action='DESELECT')
obj.select_set(True)
bpy.context.view_layer.objects.active = obj
def setArmaturePose(armature, pose):
armature.data.pose_position = pose
armature.data.update_tag()
context.scene.frame_set(context.scene.frame_current)
class PoseBone:
def __init__(self, name, head, tail, euler):
self.name = name
self.head = head
self.tail = tail
self.euler = euler
self.numVertices = 0
self.numFaces = 0
def getPoseData(obj):
if not obj.parent or obj.parent.type != 'ARMATURE':
return None
armature = obj.parent
# Toggle rest position
oldPosition = armature.data.pose_position
setArmaturePose(armature, 'REST')
bones = armature.pose.bones.values()
poseData = {}
poseData['bones'] = []
for bone in bones:
invertedEuler = getEditBoneInverseEuler(bone)
head = copy.deepcopy(bone.head)
tail = copy.deepcopy(bone.tail)
poseData['bones'].append(PoseBone(bone.name,
head,
tail,
invertedEuler))
setArmaturePose(armature, oldPosition)
poseData['vertices'] = []
objMesh = obj.to_mesh()
for v in objMesh.vertices:
vGroup = v.groups[0]
assert vGroup.weight >= 1.0, 'Only 1.0 weighted vertices are allowed'
boneName = obj.vertex_groups[vGroup.group].name
boneIndex = armature.data.bones.find(boneName)
poseData['vertices'].append(boneIndex)
assert boneIndex < len(poseData['bones']), 'BoneIndex out of bounds'
poseData['bones'][boneIndex].numVertices += 1
# Calculate number of faces per bone
objMesh = getMeshForFaceIteration(obj)
for polyIndex, poly in enumerate(objMesh.faces):
firstIndex = obj.data.loops[poly.loops[0].index].vertex_index
firstVertex = obj.data.vertices[firstIndex]
boneIndex = getVertexBoneIndex(obj, armature, firstVertex)
assert boneIndex < len(poseData['bones']), 'BoneIndex out of bounds'
poseData['bones'][boneIndex].numFaces += 1
return poseData
def getAnimation(obj, actionName):
if not obj.parent or obj.parent.type != 'ARMATURE' or actionName not in bpy.data.actions:
return None
armature = obj.parent
action = bpy.data.actions[actionName]
originalAction = armature.animation_data.action
armature.animation_data.action = action
frame_range = action.frame_range
saveFrame = copy.deepcopy(bpy.context.scene.frame_current)
frames = []
for index in range(int(frame_range[0]), int(frame_range[1])):
bpy.context.scene.frame_set(index)
newFrame = []
for boneIndex, bone in enumerate(armature.pose.bones):
euler, position = getPoseBoneEuler(bone)
newFrame.append([boneIndex, bone.name, position, euler])
frames.append(copy.deepcopy(newFrame))
armature.animation_data.action = originalAction
bpy.context.scene.frame_set(saveFrame)
return frames
def drawSuccessMessage(self, context):
self.layout.label(text='Exporting successfull!')
class ExportSegaSaturnModel(bpy.types.Operator):
"""Export blender objects to Sega Saturn model"""
bl_idname = "export.to_saturn"
bl_label = "Export Saturn Model (.SSM)"
bl_options = {'PRESET'}
filepath: bpy.props.StringProperty(subtype='FILE_PATH')
animActions: bpy.props.StringProperty(name="Actions",
description="Comma separated list of actions to export")
checkForDuplicatedTextures: bpy.props.BoolProperty(name="Check for duplicated textures",
description="Check for duplicated textures",
default = True)
useAO: bpy.props.BoolProperty(name="Use AO",
description="Use secondary texture/uv as AO",
default = False)
texturesSizeByArea: bpy.props.BoolProperty(name="Use area to discover texture size",
description="Use area to discover texture size",
default = True)
saveLogFile: bpy.props.BoolProperty(name="Save Log",
description="Generate a log of the export",
default = True)
minimumTextureSize: bpy.props.IntProperty(name="Minimum Texture size",
description="Minimum texture size",
default = 8,
max = 128,
min = 2,
step = 2)
# Resolution of output textures.
outputTextureSize: bpy.props.IntProperty(name="Texture Size (size x size)",
description="Texture Size (size x size)",
default = 16,
max = 128,
min = 2,
step = 2)
textureWriteOptionsEnum = [
(TEXTURE_WRITE_ATLAS_ONLY, 'Atlas Only', ''),
( TEXTURE_WRITE_TEX_ONLY, 'Texture Only', ''),
( TEXTURE_WRITE_TEX_ATLAS, 'Texture and Atlas', ''),
( TEXTURE_WRITE_NONE, 'None', '')]
textureWriteOptions: bpy.props.EnumProperty(items=textureWriteOptionsEnum,
name="Texture Export",
description="",
default = 0)
dontSaveSettings: bpy.props.BoolProperty(name="Dont save settings",
description="Dont save those settings",
default = False)
# Keep textures by hash.
modelTextures = {}
# Keep texture bytes to make the atlas.
modelTextureData = []
# Index to each face texture file.
faceTextures = []
faceTextureSizes = []
# Remap vertex position so we output all the vertices from the same
# bone weight at once
remappedVertexPositions = []
largestArea = 0
facesOffset = 0
verticesOffset = 0
exportedFaces = 0
exportedVertices = 0
def __init__(self):
if EXPORTER_SETTINGS_NAME in bpy.context.scene:
settings = bpy.context.scene[EXPORTER_SETTINGS_NAME]
for field in exportFields:
if field in settings:
setattr(self, field, settings[field])
@classmethod
def poll(cls, context):
return bpy.context.selected_objects is not None
def findMinimumScale(self):
assert len(bpy.context.selected_objects) > 0, 'No models selected'
obj = bpy.context.selected_objects[0]
objMesh = obj.data
smallestValue = sys.float_info.max
for f in objMesh.polygons:
for v in f.vertices:
smallestValue = min(objMesh.vertices[v].co[0], smallestValue)
smallestValue = min(objMesh.vertices[v].co[1], smallestValue)
smallestValue = min(objMesh.vertices[v].co[2], smallestValue)
scale = 1.0
while abs(smallestValue * scale) < 1.0:
scale *= 10
# print("Found scale of {}".format(scale))
return scale
def getMaximumArea(self):
assert len(bpy.context.selected_objects) > 0, 'No models selected'
obj = bpy.context.selected_objects[0]
objMesh = getMeshForFaceIteration(obj)
area = 0
if objMesh.faces.layers.int.get("FaceFlags") is None:
objMesh.faces.layers.int.new("FaceFlags")
flagsLayer = objMesh.faces.layers.int.get("FaceFlags")
for polygon in objMesh.faces:
if (polygon[flagsLayer] & FLAG_IGNORE_FACE_SIZE) != 0:
continue
faceArea = polygon.calc_area()
if faceArea > area:
area = faceArea
return area
def writeFaces(self, filePtr, logFilePtr):
self.facesOffset = filePtr.tell()
if logFilePtr:
logFilePtr.write("=============================================\n")
logFilePtr.write("Starting writing faces at {} ({})\n".format(self.facesOffset,
hex(self.facesOffset)))
minimumScale = self.minimumScale
totalIndex = 0
vertexCount = 0
faceIndex = 0
assert len(bpy.context.selected_objects) > 0, 'No models selected'
obj = bpy.context.selected_objects[0]
objMesh = getMeshForFaceIteration(obj)
if objMesh.faces.layers.int.get("FaceFlags") is None:
objMesh.faces.layers.int.new("FaceFlags")
flagsLayer = objMesh.faces.layers.int.get("FaceFlags")
if logFilePtr:
logFilePtr.write("Obj '{}' faces start at vertex index {}\n".format(obj.name, vertexCount))
for polyIndex, poly in enumerate(objMesh.faces):
indices = []
if len(poly.loops) == 3:
indices = [loop.index for loop in poly.loops[0:3]]
elif len(poly.loops) == 4:
indices = [loop.index for loop in poly.loops[0:4]]
for loop_index in reversed(indices):
index = ctypes.c_uint16(ntohs(obj.data.loops[loop_index].vertex_index + vertexCount))
filePtr.write(index)
if len(poly.loops) == 3:
index = ctypes.c_uint16(ntohs(obj.data.loops[indices[0]].vertex_index + vertexCount))
filePtr.write(index)
filePtr.write(ctypes.c_uint16(ntohs(self.faceTextures[faceIndex])))
filePtr.write(ctypes.c_uint8(self.faceTextureSizes[faceIndex]))
filePtr.write(ctypes.c_uint8(poly[flagsLayer]))
writeBlenderVector(filePtr, poly.normal)
if logFilePtr:
logIndices = [obj.data.loops[x].vertex_index + vertexCount for x in indices]
if len(logIndices) == 3:
logIndices.append(logIndices[0])
logStr = '{} / IDX {}, {}, {}, {} / TEX {} / TEXSIZE {} / FLAG {} / N {}\n'
logFilePtr.write(logStr.format(polyIndex,
*logIndices,
self.faceTextureSizes[faceIndex],
self.faceTextureSizes[faceIndex],
poly[flagsLayer],
str(poly.normal)))
faceIndex += 1
totalIndex += 4
# Add vertex count of active mesh to increase indices on next
# object.
vertexCount += len(obj.data.vertices)
def writeVertices(self, filePtr, logFilePtr):
self.verticesOffset = filePtr.tell()
if logFilePtr:
logFilePtr.write("=============================================\n")
logFilePtr.write("Starting writing vertices at {} ({})".format(self.verticesOffset,
hex(self.verticesOffset)))
assert len(bpy.context.selected_objects) > 0, 'No models selected'
obj = bpy.context.selected_objects[0]
objMesh = obj.data
minimumScale = self.minimumScale
if logFilePtr:
logFilePtr.write("{} vertices:\n".format(obj.name))
for vIndex, v in enumerate(objMesh.vertices):
copyV = v.co.copy()
writeBlenderVector(filePtr, copyV * minimumScale)
if logFilePtr:
logFilePtr.write("{} = {} {} {}\n".format(vIndex, v.co[0], v.co[2], -v.co[1]))
fV0 = fixedPoint( copyV.x)
fV1 = fixedPoint( copyV.z)
fV2 = fixedPoint(-copyV.y)
logFilePtr.write(" {} {} {}\n".format(fV0.value, fV1.value, fV2.value))
# Extract pixel values for the passed indices of a polygon.
# indices = Indices of the vertices of the polygon.
# uvLayer = UVW map layer to extract information from.
# image = Image where to search the values from.
# outTextureSize = Size of the size of output texture.
def extractFaceTexturePixels(self, indices, uvLayer, image, outTextureSize):
outputSize = outTextureSize
outputWidth = outTextureSize
outputHeight = outTextureSize
uvs = [ uvLayer[ indices[ x ] ].uv for x in range(0, len(indices)) ]
if len(indices) == 3:
uv0 = uvs[ 2 ]
uv1 = uvs[ 1 ]
uv2 = uvs[ 0 ]
uv3 = uvs[ 0 ]
else:
uv0 = uvs[ 3 ]
uv1 = uvs[ 2 ]
uv2 = uvs[ 1 ]
uv3 = uvs[ 0 ]
outputBytes = []
for y in range(0, outputHeight):
percentY = 1.0 - (y / (outputHeight - 1))
for x in range(0, outputWidth):
percentX = x / (outputWidth - 1)
# Left vertex (in Y) interpolates between index 0 and 3
# Right vertex (in Y) interpolates between index 1 and 2
leftVertex = interpolateUv(uv0, uv3, percentY)
rightVertex = interpolateUv(uv1, uv2, percentY)
finalVertexPos = interpolateUv(leftVertex, rightVertex, percentX)
pixelValue = getPixel(finalVertexPos, image)
outputBytes.extend(pixelValue)
return outputBytes
# polygonIndex = Index of polygon in model.
# objData = All model data.
# indices = Indices for each vertex/uv in face.
# uvLayer = uvLayer to recover uv's from
# image = [width, height, bpp, pixels]
# texturesDir = Path() to textures directory
def extractFaceTexture(self, polygonIndex, objData, indices, uvLayer,
image, outTextureSize, texturesDir, aoLayer,
logFilePtr):
outputSize = outTextureSize
outputWidth = outTextureSize
outputHeight = outTextureSize
uvs = [ uvLayer[ indices[ x ] ].uv for x in range(0, len(indices)) ]
outputBytes = self.extractFaceTexturePixels(indices, uvLayer, image,
outTextureSize)
if aoLayer != None:
assert len(aoLayer) == len(outputBytes), 'Image and AO must have the same size'
for pixel in range(0, len(aoLayer)):
outputBytes[pixel] *= aoLayer[pixel]
hasTransparency = False
reusedTexture = False
# Do we have a texture with that info?
texturesMatched = False
if self.checkForDuplicatedTextures == True:
textureHash = hash(frozenset(outputBytes))
if textureHash in self.modelTextures:
existingTexture = self.modelTextures[ textureHash ]
texturesMatched = (outputBytes == existingTexture[0])
self.faceTextureSizes.append(int(outputSize))
if texturesMatched and self.checkForDuplicatedTextures == True:
reusedTexture = True
self.faceTextures.append( existingTexture[ 1 ] )
if logFilePtr:
logStr = "Duplicate of texture found on polygon {}, same as {}!\n"
logFilePtr.write(logStr.format(polygonIndex, existingTexture[1]))
else:
if self.checkForDuplicatedTextures == True:
self.modelTextures[ textureHash ] = [ outputBytes, polygonIndex ]
newTextureData = []
for i in range(0, outputHeight):
lineIndex = (outputHeight - 1 - i) * outputWidth * 4
lineRef = outputBytes[lineIndex:lineIndex + outputWidth * 4]
newTextureData.extend(lineRef)
self.modelTextureData.append( [ newTextureData[:], polygonIndex ] )
self.faceTextures.append( polygonIndex )
if self.textureWriteOptions.find(TEXTURE_WRITE_TEXTURE_STR) >= 0:
newImageSavePath = Path(texturesDir / '{}.PNG'.format(polygonIndex))
newImage = bpy.data.images.new(name='tmpImg_{}'.format(polygonIndex),
width=outputWidth,
height=outputHeight,
alpha=False,
float_buffer=True)
newImage.pixels = outputBytes
newImage.filepath_raw = str(newImageSavePath)
newImage.file_format = 'PNG'
newImage.save()
bpy.data.images.remove(newImage)
return reusedTexture
def getTextures(self, objData, matIndex):
textures = []
materials = objData.materials[matIndex]
nodes = materials.node_tree.nodes
textures.extend([n for n in nodes if n.type == 'TEX_IMAGE'])
return textures
def writeTextureAtlas(self, logFilePtr):
filePath = Path(self.filepath)
filePathTexturesDir = filePath.parents[0] / filePath.stem
filePathTexturesDir.mkdir(parents=True, exist_ok=True)
atlasPath = filePathTexturesDir / Path('ATLAS')
with atlasPath.open("wb") as filePtr:
for texture, index in self.modelTextureData:
assert len(texture) % 4 == 0, "Texture bytes should be a multiple of 4."
if logFilePtr:
logFilePtr.write("Writing texture {} to atlas\n".format(index))
for pixelIndex in range(0, int(len(texture) / 4)):
realIndex = pixelIndex * 4
pixel = texture[realIndex:realIndex + 4]
rgb555 = ctypes.c_uint16(ntohs(0x8000 | rgb555FromPixel(pixel)))
filePtr.write(rgb555)
def extractModelTextures(self, logFilePtr):
filePath = Path(self.filepath)
filePathNoExt = filePath.parents[0] / filePath.stem
filePathTexturesDir = filePath.parents[0] / filePath.stem
if self.textureWriteOptions.find(TEXTURE_WRITE_TEXTURE_STR) >= 0:
filePathTexturesDir.mkdir(parents=True, exist_ok=True)
if logFilePtr:
logFilePtr.write("=============================================\n")
logFilePtr.write("TextureDir: {}\n".format(filePathTexturesDir))
assert len(bpy.context.selected_objects) > 0, 'No models selected'
obj = bpy.context.selected_objects[0]
objData = obj.data
polygonIndex = 0
# Extract pixels from UV.
assert len(objData.uv_layers) > 0, 'Object must have a UV channel'
uvLayer = objData.uv_layers[0].data
aoLayer = None
if self.useAO:
assert len(objData.uv_layers) > 1, 'Object must have a secondary UV channel'
aoLayer = objData.uv_layers[1].data
for polyIndex, poly in enumerate(objData.polygons):
matIndex = poly.material_index
assert matIndex != None, 'Object must have a material'
textures = self.getTextures(objData, matIndex)
assert len(textures) > 0, 'Object material must have a texture'
assert hasattr(textures[0], 'image'), 'Object material must have a texture image'
texImgName = textures[0].image.name
imgData = bpy.data.images[texImgName]
width = imgData.size[0]
height = imgData.size[1]
bpp = imgData.channels
outTextureSize = self.outputTextureSize
if self.texturesSizeByArea:
approximatedSize = (poly.area / self.largestArea) * self.outputTextureSize
outTextureSize = closePowerOf2(approximatedSize, self.outputTextureSize)
if outTextureSize < self.minimumTextureSize:
outTextureSize = self.minimumTextureSize
if logFilePtr:
logStr = "Face {} image '{}' ({}x{}@{} => {}x{})\n"
logFilePtr.write(logStr.format(polyIndex,
texImgName,
width,
height,
bpp,
self.outputTextureSize,
self.outputTextureSize))
# Extract pixels that way, otherwise it will be slow as hell
# because we would be accessing bpy_prop_array instead of a list.
pixels = imgData.pixels[:]
imgProperties = [width, height, bpp, pixels]
assert bpp == 3 or bpp == 4, 'Only 24BPP or 32BPP images are supported'
indices = None
if len(poly.loop_indices) == 3:
indices = poly.loop_indices[0:3]
elif len(poly.loop_indices) == 4:
indices = poly.loop_indices[0:4]
aoTexture = None
if aoLayer != None:
aoImgName = textures[1].image.name
aoData = bpy.data.images[aoImgName]
assert imgData.channels == aoData.channels, 'Texture and AO must have the same bpp'
aoProperties = [aoData.size[0], aoData.size[1], aoData.channels, aoData.pixels[:]]
aoTexture = self.extractFaceTexturePixels(indices, aoLayer,
aoProperties,
outTextureSize)
assert indices != None, "Polygon must have 3 or 4 vertices"
reusedTexture = self.extractFaceTexture(polygonIndex, objData,
indices, uvLayer,
imgProperties,
outTextureSize,
filePathTexturesDir,
aoTexture, logFilePtr)
if not reusedTexture:
polygonIndex += 1
def writeAnimationHeader(self, filePtr, logFilePtr, poseData):
bones = poseData['bones']
vertices = poseData['vertices']
# Count only bones that affect vertices
validBones = [b for b in bones if b.numVertices > 0 and b.numFaces > 0]
filePtr.write(ctypes.c_uint16(ntohs(len(validBones))))
if logFilePtr:
logFilePtr.write("=============================================\n")
logFilePtr.write("Bones ({}) starting at {} ({})\n".format(len(validBones),
filePtr.tell(),
hex(filePtr.tell())))
for bIndex, bone in enumerate(validBones):
writeBlenderVector(filePtr, bone.head)
sinEuler = applyOp(math.sin, bone.euler)
cosEuler = applyOp(math.cos, bone.euler)
writeVector(filePtr, sinEuler)
writeVector(filePtr, cosEuler)
filePtr.write(ctypes.c_uint32(ntohl(bone.numVertices)))
filePtr.write(ctypes.c_uint32(ntohl(bone.numFaces)))
if logFilePtr:
logStr = "Bone {} '{}': START {} / EULER {} / VCOUNT: {} / FCOUNT: {}\n"
logFilePtr.write(logStr.format(bIndex,
bone.name,
bone.head,
bone.euler,
bone.numVertices,
bone.numFaces))
def writeAnimation(self, filePtr, logFilePtr, animationData, poseData):
if logFilePtr:
logFilePtr.write("=============================================\n")
logStr = "Animation starting at {} ({}) with {} frames\n"
logFilePtr.write(logStr.format(filePtr.tell(),
hex(filePtr.tell()),
len(animationData)))
poseBones = poseData['bones']
frames = animationData
for frameIndex, frame in enumerate(frames):
if logFilePtr:
logStr = "Frame {} starting at {} ({})\n"
logFilePtr.write(logStr.format(frameIndex,
filePtr.tell(),
hex(filePtr.tell())))
for bone in frame:
index, name, position, euler = bone
poseBone = poseBones[index]
if poseBone.numVertices == 0 or poseBone.numFaces == 0:
if logFilePtr:
logFilePtr.write(" Skip Bone {} - {} - {} - {}\n".format(*bone))
logFilePtr.write(" Matrix: {}\n".format(euler.to_matrix()))
else:
if logFilePtr:
logFilePtr.write(" Bone {} - {} - {} - {}\n".format(*bone))
logFilePtr.write(" Matrix: {}\n".format(euler.to_matrix()))
writeVector(filePtr, position)
writeVector(filePtr, applyOp(math.sin, euler))
writeVector(filePtr, applyOp(math.cos, euler))
if logFilePtr:
logFilePtr.write("\n")
# Sort object vertices and faces by bone index, that makes the transformation cache efficient
def updateObjVerticesAndFaces(self, obj):
if not obj.parent or obj.parent.type != 'ARMATURE':
return
selectObj(obj)
boneIndices = getBoneIndices(obj)
bpy.ops.object.mode_set(mode='EDIT')
mesh = bmesh.from_edit_mesh(obj.data)
sortedVertices = []
for vertex in mesh.verts:
sortedVertices.append([vertex.index, boneIndices[vertex.index]])
sortedVertices.sort(key=lambda v: v[1])
mesh.verts.ensure_lookup_table()
vertexCounter = 0
for vertex in sortedVertices:
mesh.verts[vertex[0]].index = vertexCounter
vertexCounter += 1
mesh.verts.sort()
bmesh.update_edit_mesh(obj.data)
bpy.ops.object.mode_set(mode='OBJECT')
# Now sort faces with same criteria, but get the bone ids from
# the 'normal' mesh data.
mesh = getMeshForFaceIteration(obj)
sortedFaces = []
armature = obj.parent
for polyIndex, poly in enumerate(mesh.faces):
firstIndex = obj.data.loops[poly.loops[0].index].vertex_index
firstVertex = obj.data.vertices[firstIndex]
boneIndex = getVertexBoneIndex(obj, armature, firstVertex)
sortedFaces.append([polyIndex, boneIndex])
sortedFaces.sort(key=lambda v: v[1])
# Apply sorted faces
bpy.ops.object.mode_set(mode='EDIT')
mesh = bmesh.from_edit_mesh(obj.data)
mesh.verts.ensure_lookup_table()
faceCounter = 0
for faceIndex, boneIndex in sortedFaces:
mesh.faces[faceIndex].index = faceCounter
faceCounter += 1
mesh.faces.sort()
bmesh.update_edit_mesh(obj.data)
bpy.ops.object.mode_set(mode='OBJECT')
def writeModelData(self, context, filePtr, logFilePtr):
assert self.outputTextureSize % 2 == 0, "Texture size must be a multiple of 2"
if EXPORTER_SETTINGS_NAME not in bpy.context.scene:
bpy.context.scene[EXPORTER_SETTINGS_NAME] = {}
settings = context.scene[EXPORTER_SETTINGS_NAME]
if logFilePtr:
logFilePtr.write("Model: {}\n".format(self.filepath))
self.faceTextures = []
self.faceTextureSizes = []
self.modelTextures = {}
self.modelTextureData = []
self.minimumScale = 1.0
self.exportedFaces = 0
self.exportedVertices = 0
self.largestArea = self.getMaximumArea()
faceCount = 0
vertexCount = 0
assert len(bpy.context.selected_objects) > 0, 'No models selected'
obj = bpy.context.selected_objects[0]
self.updateObjVerticesAndFaces(obj)
objMesh = obj.data
objMesh.update()
objMesh.calc_tangents()
vertexCount += len(objMesh.vertices)
for f in objMesh.polygons:
assert len(f.vertices) == 3 or len(f.vertices) == 4, 'Only triangles and quads supported'
faceCount += 1
# Start writing to file
filePtr.write(bytes([0x53, 0x41, 0x54, 0x2E]))
filePtr.write(ctypes.c_uint16(ntohs(faceCount)))
filePtr.write(ctypes.c_uint16(ntohs(vertexCount)))
if logFilePtr:
logFilePtr.write("FaceCount: {}\n".format(faceCount))
logFilePtr.write("VertexCount: {}\n".format(vertexCount))
# Offset where Faces and vertices start.
offsetInFileForOffsets = filePtr.tell()
filePtr.write(ctypes.c_uint16(ntohs(0)))
filePtr.write(ctypes.c_uint16(ntohs(0)))
# Extract textures and associate them with faces.
self.extractModelTextures(logFilePtr)
# Faces for frame
self.writeFaces(filePtr, logFilePtr)
# Vertices for frame
self.writeVertices(filePtr, logFilePtr)
filePtr.seek(offsetInFileForOffsets, 0)
filePtr.write(ctypes.c_uint16(ntohs(self.facesOffset)))
filePtr.write(ctypes.c_uint16(ntohs(self.verticesOffset)))
# Write texture atlas.
if self.textureWriteOptions.find(TEXTURE_WRITE_ATLAS_STR) >= 0:
self.writeTextureAtlas(logFilePtr)
def writeModelAnimationData(self, context, filePtr, logFilePtr):
assert len(bpy.context.selected_objects) > 0, 'No models selected'
obj = bpy.context.selected_objects[0]
poseData = getPoseData(obj)
if poseData and logFilePtr:
logFilePtr.write("=============================================\n")
logFilePtr.write('Model has pose data, writing\n')
allActions = self.animActions.split(",")
hasActions = False
for animName in allActions:
if animName in bpy.data.actions:
hasActions = True
break
if hasActions and poseData:
filePath = Path(self.filepath).with_suffix('.SSA')
if logFilePtr:
logFilePtr.write("Saving SSA to '{}'\n".format(self.filepath))
allAnimationData = []
totalNumFrames = 0
for animName in allActions:
animationData = getAnimation(obj, animName)
allAnimationData.append(animationData)
if animationData is not None:
totalNumFrames += len(animationData)
with filePath.open("wb") as filePtr:
filePtr.write(bytes([0x53, 0x41, 0x54, 0x2E]))
self.writeAnimationHeader(filePtr, logFilePtr, poseData)
filePtr.write(ctypes.c_uint16(ntohs(totalNumFrames)))
for index, animName in enumerate(allActions):
if logFilePtr:
logFilePtr.write('Animation {}\n'.format(animName))
animationData = allAnimationData[index]
if animationData == None:
continue
self.writeAnimation(filePtr, logFilePtr, animationData, poseData)
def execute(self, context):
filePath = Path(self.filepath)
print("Saving SSM to '{}'".format(self.filepath))
if self.saveLogFile:
logFilePath = filePath.with_suffix('.log')
print("Saving SSM log to '{}'".format(logFilePath))
with filePath.open("wb") as filePtr, logFilePath.open("w") as logFilePtr:
self.writeModelData(context, filePtr, logFilePtr)
self.writeModelAnimationData(context, filePtr, logFilePtr)
else:
with filePath.open("wb") as filePtr:
self.writeModelData(context, filePtr, None)
self.writeModelAnimationData(context, filePtr, None)
# Save dialog settings
settings = context.scene[EXPORTER_SETTINGS_NAME]
if not self.dontSaveSettings:
for field in exportFields:
settings[field] = getattr(self, field)
bpy.context.window_manager.popup_menu(drawSuccessMessage,
title='Export finished',
icon='INFO')
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
# Only needed if you want to add into a dynamic menu
def menu_func(self, context):
self.layout.operator_context = 'INVOKE_DEFAULT'
self.layout.operator(ExportSegaSaturnModel.bl_idname,
text="Export to Sega Saturn")
def setDithering(self, context):
editObject = context.edit_object
bm = globalMeshes.setdefault(editObject.name,
bmesh.from_edit_mesh(editObject.data))
for face in bm.faces:
if face.select == False:
continue
layer = bm.faces.layers.int.get("FaceFlags")
if bpy.context.window_manager.useDithering:
face[layer] |= FLAG_DITHERING
else:
face[layer] &= ~FLAG_DITHERING
return None
def setGouraud(self, context):
editObject = context.edit_object
bm = globalMeshes.setdefault(editObject.name,
bmesh.from_edit_mesh(editObject.data))
for face in bm.faces:
if face.select == False:
continue
layer = bm.faces.layers.int.get("FaceFlags")
if bpy.context.window_manager.useGouraud:
face[layer] |= FLAG_GOURAUD
else:
face[layer] &= ~FLAG_GOURAUD
return None
def setShadow(self, context):
editObject = context.edit_object
bm = globalMeshes.setdefault(editObject.name,
bmesh.from_edit_mesh(editObject.data))
for face in bm.faces:
if face.select == False:
continue
layer = bm.faces.layers.int.get("FaceFlags")
if bpy.context.window_manager.useShadow:
face[layer] |= FLAG_SHADOW
else:
face[layer] &= ~FLAG_SHADOW
return None
def setTransparency(self, context):
editObject = context.edit_object
bm = globalMeshes.setdefault(editObject.name,
bmesh.from_edit_mesh(editObject.data))
for face in bm.faces:
if face.select == False:
continue
layer = bm.faces.layers.int.get("FaceFlags")
if bpy.context.window_manager.useTransparency:
face[layer] |= FLAG_TRANSPARENCY
else:
face[layer] &= ~FLAG_TRANSPARENCY
return None
def setIgnoreFaceSize(self, context):
editObject = context.edit_object
bm = globalMeshes.setdefault(editObject.name,
bmesh.from_edit_mesh(editObject.data))
for face in bm.faces:
if face.select == False:
continue
layer = bm.faces.layers.int.get("FaceFlags")
if bpy.context.window_manager.ignoreFaceSize:
face[layer] |= FLAG_IGNORE_FACE_SIZE
else:
face[layer] &= ~FLAG_IGNORE_FACE_SIZE
return None
# Store intermediate values for face flags.
bpy.types.WindowManager.useDithering = bpy.props.BoolProperty(name="Use Dithering",
update=setDithering)
bpy.types.WindowManager.useTransparency = bpy.props.BoolProperty(name="Use Transparency",
update=setTransparency)
bpy.types.WindowManager.ignoreFaceSize = bpy.props.BoolProperty(name="Ignore Face Size",
update=setIgnoreFaceSize)
bpy.types.WindowManager.useGouraud = bpy.props.BoolProperty(name="Gouraud",
update=setGouraud)
bpy.types.WindowManager.useShadow = bpy.props.BoolProperty(name="Use Shadow",
update=setShadow)
# Update window manager values
def updateWMValues(bm):
bm.faces.ensure_lookup_table()
if bm.faces.layers.int.get("FaceFlags") is None:
bm.faces.layers.int.new("FaceFlags")
activeFaces = getActiveFaces(bm)
if len(activeFaces) > 0:
face = activeFaces[ 0 ]
layer = bm.faces.layers.int.get("FaceFlags")
bpy.context.window_manager.useDithering = ((face[layer] & FLAG_DITHERING) != 0)
bpy.context.window_manager.useTransparency = ((face[layer] & FLAG_TRANSPARENCY) != 0)
bpy.context.window_manager.ignoreFaceSize = ((face[layer] & FLAG_IGNORE_FACE_SIZE) != 0)
bpy.context.window_manager.useGouraud = ((face[layer] & FLAG_GOURAUD) != 0)
bpy.context.window_manager.useShadow = ((face[layer] & FLAG_SHADOW) != 0)
return None
#scene update handler
@persistent
def editObjectChangeHandler(scene):
selectedObjects = [x for x in scene.objects if x.select_get()]
if len(selectedObjects) == 0:
globalMeshes.clear()
return None
for obj in selectedObjects:
# add one instance of edit bmesh to global dic
if obj.mode == 'EDIT' and obj.type == 'MESH':
bm = globalMeshes.setdefault(obj.name, bmesh.from_edit_mesh(obj.data))
updateWMValues(bm)
bmesh.update_edit_mesh(obj.data)
# We left edit mode, clear mesh.
elif obj.mode != 'EDIT' and obj.type == 'MESH':
if obj.name in globalMeshes:
globalMeshes[obj.name].free()
globalMeshes.pop(obj.name)
return None
def getActiveFaces(obj):
faces = []
for face in obj.faces:
if face.select:
faces.append(face)
return faces
class ROMULO_PT_SaturnEditPanel(bpy.types.Panel):
bl_idname = "ROMULO_PT_SaturnEditPanel"
bl_label = "Sega Saturn"
bl_region_type = "UI"
bl_space_type = "VIEW_3D"
@classmethod
def poll(cls, context):
# Only allow in edit mode for a selected mesh.
return context.mode == "EDIT_MESH" and context.object is not None and context.object.type == "MESH"
def draw(self, context):
selectedObject = context.object
bm = globalMeshes.setdefault(selectedObject.name,
bmesh.from_edit_mesh(selectedObject.data))
activeFaces = getActiveFaces(bm)
if len(activeFaces) > 1:
self.layout.label(text="Multiple faces selected.")
self.layout.prop(context.window_manager, "useDithering", text="Use Dithering")
self.layout.prop(context.window_manager, "useTransparency", text="Use Transparency")
self.layout.prop(context.window_manager, "ignoreFaceSize", text="Ignore Face Size")
self.layout.prop(context.window_manager, "useGouraud", text="Gouraud")
self.layout.prop(context.window_manager, "useShadow", text="Shadow")
def register():
bpy.utils.register_class(ExportSegaSaturnModel)
bpy.utils.register_class(ROMULO_PT_SaturnEditPanel)
bpy.types.TOPBAR_MT_file_export.append(menu_func)
# Face properties panel event handler.
bpy.app.handlers.depsgraph_update_post.clear()
bpy.app.handlers.depsgraph_update_post.append(editObjectChangeHandler)
def unregister():
bpy.utils.unregister_class(ExportSegaSaturnModel)
bpy.utils.unregister_class(ROMULO_PT_SaturnEditPanel)
bpy.types.TOPBAR_MT_file_export.remove(menu_func)
bpy.app.handlers.depsgraph_update_post.clear()
# This allows you to run the script directly from blenders text editor
# to test the addon without having to install it.
if __name__ == "__main__":
register()
| 36.570147
| 104
| 0.609992
|
424108b193a282a9c4b88bcb79582226f5844bc9
| 78
|
wsgi
|
Python
|
newton_sto.wsgi
|
scott-maddox/newton_sto
|
4e729a7bdd46c729532be59d18219a888706718b
|
[
"BSD-3-Clause"
] | null | null | null |
newton_sto.wsgi
|
scott-maddox/newton_sto
|
4e729a7bdd46c729532be59d18219a888706718b
|
[
"BSD-3-Clause"
] | 3
|
2015-11-14T18:24:24.000Z
|
2015-11-14T18:25:02.000Z
|
newton_sto.wsgi
|
scott-maddox/newton_sto
|
4e729a7bdd46c729532be59d18219a888706718b
|
[
"BSD-3-Clause"
] | null | null | null |
from newton_sto import app as application
import sys
sys.stdout = sys.stderr
| 15.6
| 41
| 0.807692
|
99193f1f46f89370e295cc136041992bd5a0be79
| 1,537
|
py
|
Python
|
get_header_field_info.py
|
byung-u/TeleCommunicationDocs
|
15a072187285a94bea57a3c33117f18ee7a09623
|
[
"MIT"
] | null | null | null |
get_header_field_info.py
|
byung-u/TeleCommunicationDocs
|
15a072187285a94bea57a3c33117f18ee7a09623
|
[
"MIT"
] | null | null | null |
get_header_field_info.py
|
byung-u/TeleCommunicationDocs
|
15a072187285a94bea57a3c33117f18ee7a09623
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
import sys
def main():
if len(sys.argv) != 2:
# Example
# 1. ./xxxx.py 2327
# 2. ./xxxx.py "2327,2343,3261,1343"
print('Usage: %s $file_name' % sys.argv[0])
sys.exit(2)
file_name = sys.argv[1]
get_hearder_field(file_name)
def get_hearder_field(file_name):
print('[Line]\t Header Field')
with open(file_name) as f:
for i, line in enumerate(f):
if line.find('header field') == -1:
continue
split_line = line.split()
for j, hf in enumerate(split_line):
if hf != 'header':
continue
if split_line[j+1].startswith('field') is False:
continue
header_field = split_line[j-1]
header_field = header_field.replace('\'','').replace('(','').replace(')','').replace(':','')
if header_valid_check(header_field) is False:
continue
print('%s +%d\t %s' % (file_name, i+1, header_field))
# print(i, split_line[j-1], split_line[j], split_line[j+1])
f.closed
def header_valid_check(keyword):
not_allow_keyword = ['to', 'SIP', 'uri', 'tag', 'such', 'unknown',
'yes', 'the', 'that', 'of', 'these', 'following', 'this', 'other',
'whole', 'some', 'those', 'a']
if keyword in not_allow_keyword:
return False
else:
return True
if __name__ == '__main__':
main()
| 30.137255
| 108
| 0.520494
|
fe485b81093fafa17a5cc37d4cbe0a8f7534289a
| 256,074
|
py
|
Python
|
python/mxnet/ndarray/numpy/_op.py
|
sneaxiy/NVIDIA-MxNet
|
ce30b18212fbf23f68c006a02cc034e417bb5518
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/ndarray/numpy/_op.py
|
sneaxiy/NVIDIA-MxNet
|
ce30b18212fbf23f68c006a02cc034e417bb5518
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/ndarray/numpy/_op.py
|
sneaxiy/NVIDIA-MxNet
|
ce30b18212fbf23f68c006a02cc034e417bb5518
|
[
"Apache-2.0"
] | 3
|
2021-07-20T07:40:15.000Z
|
2021-08-03T08:39:17.000Z
|
# pylint: disable=C0302
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Namespace for numpy operators used in Gluon dispatched by F=ndarray."""
import numpy as _np
from ...base import numeric_types, integer_types
from ...util import _sanity_check_params, set_module
from ...util import wrap_np_unary_func, wrap_np_binary_func
from ...context import current_context
from . import _internal as _npi
from . import _api_internal
from ..ndarray import NDArray
__all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'invert', 'delete',
'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert', 'fabs',
'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'matmul',
'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram',
'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort',
'tensordot', 'eye', 'linspace',
'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit',
'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'minimum', 'around', 'round', 'round_',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index',
'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr',
'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm',
'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',
'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'rot90', 'einsum',
'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory',
'diff', 'ediff1d', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite',
'where', 'bincount', 'pad']
@set_module('mxnet.ndarray.numpy')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
"""
return a.shape
@set_module('mxnet.ndarray.numpy')
def zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is `numpy.float32`. Note that this
behavior is different from NumPy's `zeros` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
# If the following code (4 lines) regarding ctx is removed
# np.zeros((3, 4)) can be as fast as 4.96 us
if ctx is None:
ctx = str(current_context())
else:
ctx = str(ctx)
if dtype is not None and not isinstance(dtype, str):
dtype = _np.dtype(dtype).name
return _api_internal.zeros(shape, dtype, ctx)
@set_module('mxnet.ndarray.numpy')
def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with ones.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is `numpy.float32`. Note that this
behavior is different from NumPy's `ones` function where `float64`
is the default value, because `float32` is considered as the default
data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and ctx.
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.ones(shape=shape, ctx=ctx, dtype=dtype)
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.zeros_like(x)
array([[0., 0., 0.],
[0., 0., 0.]])
>>> np.zeros_like(x, int)
array([[0, 0, 0],
[0, 0, 0]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.zeros_like(y)
array([0., 0., 0.], dtype=float64)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=0, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of ones with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.ones_like(x)
array([[1., 1., 1.],
[1., 1., 1.]])
>>> np.ones_like(x, int)
array([[1, 1, 1],
[1, 1, 1]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.ones_like(y)
array([1., 1., 1.], dtype=float64)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
return _npi.full_like(a, fill_value=1, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def broadcast_to(array, shape):
"""
Broadcast an array to a new shape.
Parameters
----------
array : ndarray or scalar
The array to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
MXNetError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
"""
if _np.isscalar(array):
return full(shape, array)
return _npi.broadcast_to(array, shape)
@set_module('mxnet.ndarray.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or ndarray
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
If `fill_value` is an ndarray, out will have the same context as `fill_value`
regardless of the provided `ctx`.
Notes
-----
This function differs from the original `numpy.full
https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in
the following way(s):
- Have an additional `ctx` argument to specify the device
- Have an additional `out` argument
- Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
if isinstance(fill_value, NDArray):
if dtype is None:
ret = broadcast_to(fill_value, shape)
else:
ret = broadcast_to(fill_value, shape).astype(dtype)
return ret
if isinstance(fill_value, bool):
fill_value = int(fill_value)
dtype = _np.bool if dtype is None else dtype
dtype = _np.float32 if dtype is None else dtype
return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out)
# pylint: enable=too-many-arguments, redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1], dtype=int64)
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0], dtype=int64)
>>> np.full_like(x, 0.1, dtype=np.float64)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)
>>> np.full_like(x, np.nan, dtype=np.double)
array([nan, nan, nan, nan, nan, nan], dtype=float64)
>>> y = np.arange(6, dtype=np.float32)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
if order != 'C':
raise NotImplementedError
if ctx is None:
ctx = current_context()
if isinstance(fill_value, bool):
fill_value = int(fill_value)
return _npi.full_like(a, fill_value=fill_value, dtype=dtype, ctx=ctx, out=out)
@set_module('mxnet.ndarray.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : ndarray
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
subok : {False}, optional
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to False.
(Only support False at this moment)
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
(Not supported at this moment)
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.empty_like(a)
array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized
[ 4567052944, -5764607523034234880, 844424930131968]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized
[2.0e-323, 2.5e-323, 3.0e-323]])
"""
dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32',
_np.int64:'int64', _np.float16:'float16', _np.float32:'float32',
_np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'}
if order != 'C':
raise NotImplementedError("Only support C-order at this moment")
if subok:
raise NotImplementedError("Creating array by using sub-class is not supported at this moment")
if shape is not None:
raise NotImplementedError("Assigning new shape is not supported at this moment")
try:
dtype = dtype if isinstance(dtype, str) else dtype_list[dtype]
except:
raise NotImplementedError("Do not support this dtype at this moment")
return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
@set_module('mxnet.ndarray.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. The default is `float32`.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
"""
if dtype is None:
dtype = 'float32'
if ctx is None:
ctx = current_context()
if stop is None:
stop = start
start = 0
if step is None:
step = 1
if start is None and stop is None:
raise ValueError('start and stop cannot be both None')
if step == 0:
raise ZeroDivisionError('step cannot be 0')
return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def identity(n, dtype=None, ctx=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``numpy.float32``.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
if not isinstance(n, int):
raise TypeError("Input 'n' should be an integer")
if n < 0:
raise ValueError("Input 'n' cannot be negative")
if ctx is None:
ctx = current_context()
dtype = _np.float32 if dtype is None else dtype
return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
r"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : ndarray
The source array.
indices : ndarray
The indices of the values to extract. Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'clip', 'wrap'}, optional
Specifies how out-of-bounds indices will behave.
* 'clip' -- clip to the range (default)
* 'wrap' -- wrap around
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray
The returned array has the same type as `a`.
Notes
-----
This function differs from the original `numpy.take
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in
the following way(s):
- Only ndarray or scalar ndarray is accepted as valid input.
Examples
--------
>>> a = np.array([4, 3, 5, 7, 6, 8])
>>> indices = np.array([0, 1, 4])
>>> np.take(a, indices)
array([4., 3., 6.])
In this example for `a` is an ndarray, "fancy" indexing can be used.
>>> a[indices]
array([4., 3., 6.])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, np.array([[0, 1], [2, 3]]))
array([[4., 3.],
[5., 7.]])
"""
if mode not in ('wrap', 'clip', 'raise'):
raise NotImplementedError(
"function take does not support mode '{}'".format(mode))
if axis is None:
return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out)
else:
return _npi.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : ndarray
Input array.
obj : int, slice or ndarray of int64
Object that defines the index or indices before which `values` is
inserted.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (only support int32 and int64 element).
values : ndarray
Values to insert into `arr`.
If the type of values is different from that of arr, values is converted
to the type of arr.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
Notes
-----
- Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
- If obj is a ndarray, it's dtype only supports int64
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1., 1.],
[2., 2.],
[3., 3.]])
>>> np.insert(a, 1, np.array(5))
array([1., 5., 1., 2., 2., 3., 3.])
>>> np.insert(a, 1, np.array(5), axis=1)
array([[1., 5., 1.],
[2., 5., 2.],
[3., 5., 3.]])
Difference between sequence and scalars:
>>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> b = a.flatten()
>>> b
array([1., 1., 2., 2., 3., 3.])
>>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))
array([1., 1., 5., 6., 2., 2., 3., 3.])
>>> np.insert(b, slice(2, 4), np.array([5, 6]))
array([1., 1., 5., 2., 6., 2., 3., 3.])
# type casting
>>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))
array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)
>>> x = np.arange(8).reshape(2, 4)
>>> idx = np.array([1, 3], dtype=np.int64)
>>> np.insert(x, idx, np.array([999]), axis=1)
array([[ 0., 999., 1., 2., 999., 3.],
[ 4., 999., 5., 6., 999., 7.]])
"""
if isinstance(values, numeric_types):
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, obj, val=values, axis=axis)
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if not isinstance(values, NDArray):
raise TypeError("'values' can not support type {}".format(str(type(values))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.insert_tensor(arr, values, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : ndarray or numeric value
Left-hand side operand.
rhs : ndarray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``ndarray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``ndarray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``ndarray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
mxnet.numpy.ndarray or scalar
result array or scalar
"""
from ...numpy import ndarray
from ..ndarray import from_numpy # pylint: disable=unused-import
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs, out=out)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs), out=out)
else:
return rfn_scalar(rhs, float(lhs), out=out)
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs), out=out)
elif isinstance(lhs, ndarray) and isinstance(rhs, ndarray):
return fn_array(lhs, rhs, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(rhs))))
#pylint: enable= too-many-arguments, no-member, protected-access
@set_module('mxnet.ndarray.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : ndarray
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. The default is None.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
This function differs from the original `numpy.unique
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in
the following aspects:
- Only support ndarray as input.
- Object arrays or structured arrays are not supported.
Examples
--------
>>> np.unique(np.array([1, 1, 2, 2, 3, 3]))
array([1., 2., 3.])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1., 2., 3.])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1., 0., 0.],
[2., 3., 4.]])
Return the indices of the original array that give the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 5, 3, 2], dtype=int64)
>>> a[indices]
array([1., 2., 3., 4., 6.])
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 4, 3, 1, 2, 1], dtype=int64)
>>> u[indices]
array([1., 2., 6., 4., 2., 3., 2.])
"""
ret = _npi.unique(ar, return_index, return_inverse, return_counts, axis)
if isinstance(ret, list):
return tuple(ret)
else:
return ret
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
add : ndarray or scalar
The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
"""
Subtract arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be subtracted from each other. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape
of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
subtract : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar,
_npi.rsubtract_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The multiplication of x1 and x2, element-wise. This is a scalar if both x1 and x2
are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
"""
return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
"""
Returns a true division of the inputs, element-wise.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 type.
"""
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
def true_divide(x1, x2, out=None):
"""Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 type.
"""
return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar,
_npi.rtrue_divide_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : ndarray
Input array.
obj : slice, int or ndarray of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, 1, 0)
array([[ 1., 2., 3., 4.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, slice(None, None, 2), 1)
array([[ 2., 4.],
[ 6., 8.],
[10., 12.]])
>>> np.delete(arr, np.array([1,3,5]), None)
array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])
>>> np.delete(arr, np.array([1,1,5]), None)
array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])
"""
if not isinstance(arr, NDArray):
raise TypeError("'arr' can not support type {}".format(str(type(arr))))
if isinstance(obj, slice):
start = obj.start
stop = obj.stop
step = 1 if obj.step is None else obj.step
return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis)
elif isinstance(obj, integer_types):
return _npi.delete(arr, int_ind=obj, axis=axis)
elif isinstance(obj, NDArray):
return _npi.delete(arr, obj, axis=axis)
else:
raise TypeError("'obj' can not support type {}".format(str(type(obj))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None):
"""
Matrix product of two arrays.
Parameters
----------
a, b : ndarray
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored.
If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
MXNetError
If the last dimension of a is not the same size as the second-to-last dimension of b.
If a scalar value is passed in.
See Also
--------
tensordot :
Sum products over arbitrary axes.
dot :
alternative matrix product with different broadcasting rules.
einsum :
Einstein summation convention.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional matrices.
- If either argument is N-D, N > 2, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
matmul differs from dot in two important ways:
- Multiplication by scalars is not allowed, use multiply instead.
- Stacks of matrices are broadcast together as if the matrices were elements,
respecting the signature (n,k),(k,m)->(n,m):
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4., 1.],
[2., 2.]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1., 2.])
>>> np.matmul(b, a)
array([1., 2.])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a, b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
array(98.)
>>> sum(a[0, 1, :] * b[0, :, 1])
array(98.)
Scalar multiplication raises an error.
>>> np.matmul([1, 2], 3)
Traceback (most recent call last):
...
mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.
"""
return _npi.matmul(a, b, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
"""
First array elements raised to powers from second array, element-wise.
Parameters
----------
x1 : ndarray or scalar
The bases.
x2 : ndarray or scalar
The exponent.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars.
"""
return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out)
@set_module('mxnet.ndarray.numpy')
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : ndarray
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
"""
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64')
@set_module('mxnet.ndarray.numpy')
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : ndarray
Array to be sorted.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
"""
if order is not None:
raise NotImplementedError("order not supported here")
return _npi.sort(data=a, axis=axis, is_ascend=True)
@set_module('mxnet.ndarray.numpy')
def tensordot(a, b, axes=2):
r"""
tensordot(a, b, axes=2)
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : ndarray, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) ndarray
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements ndarray must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
"""
return _api_internal.tensordot(a, b, axes)
@set_module('mxnet.ndarray.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments
"""
Compute the histogram of a set of data.
Parameters
----------
a : ndarray
Input data. The histogram is computed over the flattened array.
bins : int or NDArray
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float)
The lower and upper range of the bins. Required when `bins` is an integer.
Values outside the range are ignored. The first element of the range must
be less than or equal to the second.
normed : bool, optional
Not supported yet, coming soon.
weights : array_like, optional
Not supported yet, coming soon.
density : bool, optional
Not supported yet, coming soon.
"""
if normed is True:
raise NotImplementedError("normed is not supported yet...")
if weights is not None:
raise NotImplementedError("weights is not supported yet...")
if density is True:
raise NotImplementedError("density is not supported yet...")
if isinstance(bins, numeric_types):
if range is None:
raise NotImplementedError("automatic range is not supported yet...")
return _npi.histogram(a, bin_cnt=bins, range=range)
if isinstance(bins, (list, tuple)):
raise NotImplementedError("array_like bins is not supported yet...")
if isinstance(bins, str):
raise NotImplementedError("string bins is not supported yet...")
if isinstance(bins, NDArray):
return _npi.histogram(a, bins=bins)
raise ValueError("np.histogram fails with", locals())
@set_module('mxnet.ndarray.numpy')
def eye(N, M=None, k=0, dtype=_np.float32, **kwargs):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to N.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
"""
_sanity_check_params('eye', ['order'], kwargs)
ctx = kwargs.pop('ctx', current_context())
if ctx is None:
ctx = current_context()
return _npi.eye(N, M, k, ctx, dtype)
@set_module('mxnet.ndarray.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : ndarray
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
Notes
-----
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
- `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
- axis could only be 0
- There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
if isinstance(start, (list, _np.ndarray, NDArray)) or \
isinstance(stop, (list, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
if retstep:
step = (stop - start) / (num - 1)
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step
else:
return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : int or float
``base ** start`` is the starting value of the sequence.
stop : int or float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Now, axis only support axis = 0.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code. Now wo only support axis = 0.
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
...
>>> power(base, y).astype(dtype)
...
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.44347, 464.15887, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.82794, 316.22775, 562.3413 ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396843, 6.349604 , 8. ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)
array([4, 5, 6, 8], dtype=int32)
>>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))
array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))
"""
if isinstance(start, (list, tuple, _np.ndarray, NDArray)) or \
isinstance(stop, (list, tuple, _np.ndarray, NDArray)):
raise NotImplementedError('start and stop only support int and float')
if axis != 0:
raise NotImplementedError("the function only support axis 0")
if ctx is None:
ctx = current_context()
return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype)
@set_module('mxnet.ndarray.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded
Parameters
----------
a : ndarray
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays for computing lowest common multiple. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape of
one or the other).
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm(np.arange(6, dtype=int), 20)
array([ 0, 20, 20, 60, 20, 20], dtype=int64)
"""
return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def tril(m, k=0):
r"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : ndarray, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
>>> np.tril(a, -1)
array([[ 0., 0., 0.],
[ 4., 0., 0.],
[ 7., 8., 0.],
[10., 11., 12.]])
"""
return _npi.tril(m, k)
def _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs):
"""Helper function for unary operators.
Parameters
----------
x : ndarray or scalar
Input of the unary operator.
fn_array : function
Function to be called if x is of ``ndarray`` type.
fn_scalar : function
Function to be called if x is a Python scalar.
out : ndarray
The buffer ndarray for storing the result of the unary function.
Returns
-------
out : mxnet.numpy.ndarray or scalar
Result array or scalar.
"""
if isinstance(x, numeric_types):
return fn_scalar(x, **kwargs)
elif isinstance(x, NDArray):
return fn_array(x, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
r"""
Trigonometric sine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The sine of each element of x. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sin(np.pi/2.)
1.0
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])
"""
return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
r"""
Cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.cos(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sinh(0)
0.0
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.sinh(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cosh(0)
1.0
"""
return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : ndarray or scalar.
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic tangent values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
- input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
"""
return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which t'absolute', he result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.log10(np.array([1e-15, -3.]))
array([-15., nan])
"""
return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : ndarray or scalar
The values whose square-roots are required.
out : ndarray, or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sqrt(np.array([1,4,9]))
array([1., 2., 3.])
>>> np.sqrt(np.array([4, -1, _np.inf]))
array([ 2., nan, inf])
"""
return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
r"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : ndarray
The values whose cube-roots are required.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
y : ndarray
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
Examples
----------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
"""
return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.abs(x)
array([1.2, 1.2])
"""
return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def fabs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs(np.array([-1.2, 1.2]))s
array([ 1.2, 1.2])
"""
return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
absolute : ndarray
An ndarray containing the absolute value of each element in x.
Examples
----------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
"""
return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
r"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : ndarray or a scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The sign of `x`.
This is a scalar if `x` is a scalar.
Note
-------
- Only supports real number as input elements.
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([-5., 4.5])
>>> np.sign(a)
array([-1., 1.])
>>> # Use scalars as inputs:
>>> np.sign(4.0)
1.0
>>> np.sign(0)
0
>>> # Use ``out`` parameter:
>>> b = np.zeros((2, ))
>>> np.sign(a, out=b)
array([-1., 1.])
>>> b
array([-1., 1.])
"""
return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
r"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.exp(1)
2.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])
"""
return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
r"""
Calculate `exp(x) - 1` of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential minus one: `out = exp(x) - 1`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.expm1(1)
1.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.expm1(x)
array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])
"""
return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
r"""
Inverse sine, element-wise.
Parameters
----------
x : ndarray or scalar
`y`-coordinate on the unit circle.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
angle : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
- Only support ndarray or scalar now.
- `where` argument is not supported.
- Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
r"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : ndarray
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that
the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
angle : ndarray
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
See also
----------
cos, arctan, arcsin
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
Examples
----------
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
"""
return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
r"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
Examples
--------
>>> x = np.array([0, 1])
>>> np.arctan(x)
array([0. , 0.7853982])
>>> np.pi/4
0.7853981633974483
"""
return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and
``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)
>>> np.log(a)
array([ 0., 1., 2., -inf], dtype=float64)
>>> # Using default float32 dtype may lead to slightly different behavior:
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float32)
>>> np.log(a)
array([ 0., 0.99999994, 2., -inf])
>>> np.log(1)
0.0
"""
return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> rad = np.arange(12.) * np.pi / 6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> # Use specified ``out`` ndarray:
>>> out = np.zeros((rad.shape))
>>> np.degrees(rad, out)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> out
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
"""
return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
r"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"rad2deg(x)" is "x *180 / pi".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
"""
return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 1., 2., 2.])
"""
return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-inf, 0., 1., 4.])
"""
return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Input array in degrees.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding radian values. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
r"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"deg2rad(x)" is "x * pi / 180".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.deg2rad(180)
3.1415927
"""
return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
r"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : ndarray or scalar
The values whose reciprocals are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> x = np.array([1, 2., 3.33])
>>> np.reciprocal(x)
array([1. , 0.5 , 0.3003003])
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
"""
return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
r"""
Return the element-wise square of the input.
Parameters
----------
x : ndarray or scalar
The values whose squares are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.square(2.)
4.0
>>> x = np.array([1, 2., -1])
>>> np.square(x)
array([1., 4., 1.])
Notes
-----
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
- Complex input is not supported.
"""
return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
r"""
Numerical negative, element-wise.
Parameters:
------------
x : ndarray or scalar
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored.
Returns:
---------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples:
---------
>>> np.negative(1)
-1
"""
return _unary_func_helper(x, _npi.negative, _np.negative, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
r"""
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters:
----------
x : ndarray
An array of floats to be rounded
out : ndarray, optional
Output array
Returns:
-------
y : ndarray of floats
Examples
---------
>>> np.fix(3.14)
3
"""
return _unary_func_helper(x, _npi.fix, _np.fix, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
r"""
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters:
----------
x : ndarray
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
where : ndarray, optional
Values of True indicate to calculate the ufunc at that position,
values of False indicate to leave the value in the output alone.
Returns:
-------
y : ndarray
The corresponding tangent values. This is a scalar if x is a scalar.
Examples:
---------
>>> np.tan(0.5)
0.5463024898437905
"""
return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a same shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> #if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The floor of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a same shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> #if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.trunc in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
r"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : ndarray or scalar
Logical NOT is applied to the elements of `x`.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.logical_not in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> x= np.array([True, False, 0, 1])
>>> np.logical_not(x)
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
"""
return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
r"""
Inverse hyperbolic sine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arcsinh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. DType of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arcsinh(a)
array([1.8309381, 2.2924316])
>>> np.arcsinh(1)
0.0
"""
return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arccosh(a)
array([1.8309381, 2.2924316])
>>> np.arccosh(1)
0.0
"""
return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
r"""
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arctanh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([0.0, -0.5])
>>> np.arctanh(a)
array([0., -0.54930615])
>>> np.arctanh(0.0)
0.0
"""
return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : ndarray or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0., 1., 2., 0., 1., 2.])
>>> np.tile(a, (2, 2))
array([[0., 1., 2., 0., 1., 2.],
[0., 1., 2., 0., 1., 2.]])
>>> np.tile(a, (2, 1, 2))
array([[[0., 1., 2., 0., 1., 2.]],
[[0., 1., 2., 0., 1., 2.]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1., 2., 1., 2.],
[3., 4., 3., 4.]])
>>> np.(b, (2, 1))
array([[1., 2.],
[3., 4.],
[1., 2.],
[3., 4.]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
Scalar as input:
>>> np.tile(2, 3)
array([2, 2, 2]) # repeating integer `2`
"""
return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
"""
axis_size = ary.shape[axis]
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
if axis_size % sections:
raise ValueError('array split does not result in an equal division')
section_size = int(axis_size / sections)
indices = [i * section_size for i in range(sections)]
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False)
assert isinstance(ret, list), 'Output of split should be list,' \
' got a return type {}'.format(type(ret))
return ret
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def array_split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an array of length l that should be split into n sections, it returns
l % n sub-arrays of size l//n + 1 and the rest of size l//n.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
Param used to determine the number and size of the subarray.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Examples
--------
>>> x = np.arange(9.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.array_split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.split(ary, indices, axis, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def hsplit(ary, indices_or_sections):
"""Split an array into multiple sub-arrays horizontally (column-wise).
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int, list of ints or tuple of ints.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a list of sorted integers, the entries
indicate where along `axis` the array is split.
If an index exceeds the dimension of the array along `axis`,
it will raises errors. so index must less than or euqal to
the dimension of the array along axis.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Notes
------
- If `indices_or_sections` is given as an integer, but a split
does not result in equal division.It will raises ValueErrors.
- If indices_or_sections is an integer, and the number is 1, it will
raises an error. Because single output from split is not supported yet...
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, [3, 6])
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float32)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
If ``ary`` has one dimension, 'axis' = 0.
>>> x = np.arange(4)
array([0., 1., 2., 3.])
>>> np.hsplit(x, 2)
[array([0., 1.]), array([2., 3.])]
If you want to produce an empty sub-array, you can see an example.
>>> np.hsplit(x, [2, 2])
[array([0., 1.]), array([], dtype=float32), array([2., 3.])]
"""
if len(ary.shape) < 1:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
indices = []
sections = 0
if isinstance(indices_or_sections, integer_types):
sections = indices_or_sections
elif isinstance(indices_or_sections, (list, set, tuple)):
indices = [0] + list(indices_or_sections)
else:
raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints')
ret = _npi.hsplit(ary, indices, 1, False, sections)
if not isinstance(ret, list):
return [ret]
return ret
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def vsplit(ary, indices_or_sections):
r"""
vsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays vertically (row-wise).
``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split
along the first axis regardless of the array dimension.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 0. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 0 the array is split. For example, ``[2, 3]`` would result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along axis 0, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
- In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]), array([[[4., 5.],
[6., 7.]]])]
"""
if len(ary.shape) < 2:
raise ValueError("vsplit only works on arrays of 2 or more dimensions")
return split(ary, indices_or_sections, 0)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 2. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 2 the array is split. For example, ``[2, 3]`` would result in
- ary[:, :, :2]
- ary[:, :, 2:3]
- ary[:, :, 3:]
If an index exceeds the dimension of the array along axis 2, an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
if len(ary.shape) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def concatenate(seq, axis=0, out=None):
"""
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of ndarray
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1., 2.],
[3., 4.],
[5., 6.]])
>>> np.concatenate((a, b), axis=None)
array([1., 2., 3., 4., 5., 6.])
>>> np.concatenate((a, b.T), axis=1)
array([[1., 2., 5.],
[3., 4., 6.]])
"""
return _npi.concatenate(*seq, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def append(arr, values, axis=None): # pylint: disable=redefined-outer-name
"""
Append values to the end of an array.
Parameters
----------
arr : ndarray
Values are appended to a copy of this array.
values : ndarray
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
Examples
--------
>>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))
array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
When `axis` is specified, `values` must have the correct shape.
>>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
"""
return _npi.concatenate(arr, values, axis=axis, out=None)
@set_module('mxnet.ndarray.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of ndarray
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays."""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.stack(*arrays, axis=axis, out=out)
@set_module('mxnet.ndarray.numpy')
def vstack(arrays, out=None):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def row_stack(arrays):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
def get_list(arrays):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
raise ValueError("expected iterable for arrays but got {}".format(type(arrays)))
return [arr for arr in arrays]
arrays = get_list(arrays)
return _npi.vstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Returns
--------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.column_stack(*tup)
@set_module('mxnet.ndarray.numpy')
def hstack(arrays):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis,
except for 1-D arrays where it concatenates along the first axis.
Rebuilds arrays divided by hsplit.
This function makes most sense for arrays with up to 3 dimensions.
For instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions concatenate,
stack and block provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
Examples
--------
>>> from mxnet import np,npx
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1., 2., 3., 2., 3., 4.])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _npi.hstack(*arrays)
@set_module('mxnet.ndarray.numpy')
def dstack(arrays):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _npi.dstack(*arrays)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars."""
return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : ndarray
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
Swapped array. This is always a copy of the input array.
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.ndarray.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : ndarray
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
ndarray `a_min` and `a_max` are not supported.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32)
>>> np.clip(a, 3, 6, out=a)
array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32)
"""
if a_min is None and a_max is None:
raise ValueError('array_clip: must set either max or min')
if a_min is None:
a_min = float('-inf')
if a_max is None:
a_max = float('inf')
return _npi.clip(a, a_min, a_max, out=out)
@set_module('mxnet.ndarray.numpy')
def argmax(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmax(a)
array(5.)
>>> np.argmax(a, axis=0)
array([1., 1., 1.])
>>> np.argmax(a, axis=1)
array([2., 2.])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0., 5., 2., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(1.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmax(a, axis=1, out=b)
array([2., 2.])
>>> b
array([2., 2.])
"""
return _npi.argmax(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def argmin(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmin(a)
array(0.)
>>> np.argmin(a, axis=0)
array([0., 0., 0.])
>>> np.argmin(a, axis=1)
array([0., 0.])
>>> b = np.arange(6)
>>> b[2] = 0
>>> b
array([0., 1., 0., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(0.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmin(a, axis=1, out=b)
array([0., 0.])
>>> b
array([0., 0.])
"""
return _npi.argmin(a, axis=axis, keepdims=False, out=out)
@set_module('mxnet.ndarray.numpy')
def average(a, axis=None, weights=None, returned=False, out=None):
"""
Compute the weighted average along the specified axis.
Parameters
--------
a : ndarray
Array containing data to be averaged.
axis : None or int or tuple of ints, optional
Axis or axes along which to average a.
The default, axis=None, will average over
all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
New in version 1.7.0.
If axis is a tuple of ints, averaging is
performed on all of the axes specified in the tuple
instead of a single axis or all the axes as before.
weights : ndarray, optional
An array of weights associated with the values in a, must be the same dtype with a.
Each value in a contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of a along the given axis) or of the same shape as a.
If weights=None, then all data in a are assumed to have a weight equal to one.
The 1-D calculation is: avg = sum(a * weights) / sum(weights)
The only constraint on weights is that sum(weights) must not be 0.
returned : bool, optional
Default is False.
If True, the tuple (average, sum_of_weights) is returned,
otherwise only the average is returned.
If weights=None, sum_of_weights is equivalent to
the number of elements over which the average is taken.
out : ndarray, optional
If provided, the calculation is done into this array.
Returns
--------
retval, [sum_of_weights] : ndarray
Return the average along the specified axis.
When returned is True, return a tuple with the average as the first element
and the sum of the weights as the second element. sum_of_weights is of the same type as retval.
If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a.
Raises
--------
MXNetError
- When all weights along axis sum to zero.
- When the length of 1D weights is not the same as the shape of a along axis.
- When given 1D weights, the axis is not specified or is not int.
- When the shape of weights and a differ, but weights are not 1D.
See also
--------
mean
Notes
--------
This function differs from the original `numpy.average`
<https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in
the following way(s):
- Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens
- Does not support complex dtype
- The dtypes of a and weights must be the same
- Integral a results in float32 returned dtype, not float64
Examples
--------
>>> data = np.arange(1, 5)
>>> data
array([1., 2., 3., 4.])
>>> np.average(data)
array(2.5)
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
array(4.)
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> weights = np.array([0.25, 0.75])
array([0.25, 0.75])
>>> np.average(data, axis=1, weights=weights)
array([0.75, 2.75, 4.75])
"""
if weights is None:
return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out)
else:
return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out)
@set_module('mxnet.ndarray.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
mean(a, axis=None, dtype=None, out=None, keepdims=None)
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
ndarray containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default is float32;
for floating point inputs, it is the same as the input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default is None; if provided,
it must have the same shape and type as the expected output
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of ndarray, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables or scalar is not supported
- default data type for integer input is float32
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
array(0.45)
>>> np.std(a, dtype=np.float64)
array(0.45, dtype=float64)
"""
return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
@set_module('mxnet.ndarray.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : ndarray
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
array(1.25)
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
array(0.2025)
>>> np.var(a, dtype=np.float64)
array(0.2025, dtype=float64)
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
def indices(dimensions, dtype=_np.int32, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `float32`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int32)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
if isinstance(dimensions, (tuple, list)):
if ctx is None:
ctx = current_context()
return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
else:
raise ValueError("The dimensions must be sequence of ints")
# pylint: enable=redefined-outer-name
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
r"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : ndarray or scalar
Values to change the sign of.
x2 : ndarray or scalar
The sign of `x2` is copied to `x1`.
out : ndarray or None, optional
A location into which the result is stored. It must be of the
right shape and right type to hold the output. If not provided
or `None`,a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-------
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
- ``where`` param is not supported.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> a = np.array([-1, 0, 1])
>>> np.copysign(a, -1.1)
array([-1., -0., -1.])
>>> np.copysign(a, np.arange(3)-1)
array([-1., 0., 1.])
"""
return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out)
@set_module('mxnet.ndarray.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : ndarray
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : ndarray
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support row-major, C-style order.
Examples
--------
It is equivalent to ``reshape(x, -1)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1. 2. 3. 4. 5. 6.]
>>> print(x.reshape(-1))
[1. 2. 3. 4. 5. 6.]
>>> print(np.ravel(x.T))
[1. 4. 2. 5. 3. 6.]
"""
if order == 'F':
raise NotImplementedError('order {} is not supported'.format(order))
if isinstance(x, numeric_types):
return _np.reshape(x, -1)
elif isinstance(x, NDArray):
return _npi.reshape(x, -1)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name
"""
Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters:
-------------
indices : array_like
An integer array whose elements are indices into the flattened version of an array of dimensions shape.
Before version 1.6.0, this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling indices.
Returns:
-------------
unraveled_coords : ndarray
Each row in the ndarray has the same shape as the indices array.
Each column in the ndarray represents the unravelled index
Examples:
-------------
>>> np.unravel_index([22, 41, 37], (7,6))
([3. 6. 6.]
[4. 5. 1.])
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
"""
if order == 'C':
if isinstance(indices, numeric_types):
return _np.unravel_index(indices, shape)
ret = _npi.unravel_index_fallback(indices, shape=shape)
ret_list = []
for item in ret:
ret_list += [item]
return tuple(ret_list)
else:
raise NotImplementedError('Do not support column-major (Fortran-style) order at this moment')
def diag_indices_from(arr):
"""
This returns a tuple of indices that can be used to access the main diagonal of an array
a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is
the usual diagonal, for a.ndim > 2 this is the set of indices to access
a[i, i, ..., i] for i = [0..n-1].
Parameters:
-------------
arr : ndarray
Input array for acessing the main diagonal. All dimensions
should have equal length.
Return:
-------------
diag: tuple of ndarray
indices of the main diagonal.
Examples:
-------------
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> idx = np.diag_indices_from(a)
>>> idx
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[idx] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
"""
return tuple(_npi.diag_indices_from(arr))
@set_module('mxnet.ndarray.numpy')
def hanning(M, dtype=_np.float32, ctx=None):
r"""Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
blackman, hamming
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,
0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,
0.07937312, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hanning(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def hamming(M, dtype=_np.float32, ctx=None):
r"""Return the hamming window.
The hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
blackman, hanning
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,
0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,
0.15302327, 0.08000001])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("hamming window")
Text(0.5, 1.0, 'hamming window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.hamming(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def blackman(M, dtype=_np.float32, ctx=None):
r"""Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
dtype : str or numpy.dtype, optional
An optional value type. Default is `float32`. Note that you need
select numpy.float32 or float64 in this operator.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
hamming, hanning
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1})
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,
7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,
4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.blackman(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("blackman window")
Text(0.5, 1.0, 'blackman window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
if ctx is None:
ctx = current_context()
return _npi.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.ndarray.numpy')
def flip(m, axis=None, out=None):
r"""
flip(m, axis=None, out=None)
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : ndarray or scalar
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
out : ndarray or scalar, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
out : ndarray or scalar
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
"""
from ...numpy import ndarray
if isinstance(m, numeric_types):
return _np.flip(m, axis)
elif isinstance(m, ndarray):
return _npi.flip(m, axis, out=out)
else:
raise TypeError('type {} not supported'.format(str(type(m))))
@set_module('mxnet.ndarray.numpy')
def flipud(m):
r"""
flipud(*args, **kwargs)
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag(np.array([1.0, 2, 3]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
array(True)
>>> np.flipud(np.array([1,2]))
array([2., 1.])
"""
return flip(m, 0)
@set_module('mxnet.ndarray.numpy')
def fliplr(m):
r"""
fliplr(*args, **kwargs)
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag(np.array([1.,2.,3.]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
array(True)
"""
return flip(m, 1)
@set_module('mxnet.ndarray.numpy')
def around(x, decimals=0, out=None, **kwargs):
r"""
around(x, decimals=0, out=None)
Evenly round to the given number of decimals.
Parameters
----------
x : ndarray or scalar
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
rounded_array : ndarray or scalar
An array of the same type as `x`, containing the rounded values.
A reference to the result is returned.
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
This function differs from the original numpy.prod in the following aspects:
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot support complex-valued number.
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1, 2, 3, 11], decimals=-1)
array([ 0, 0, 0, 10])
"""
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
def round(x, decimals=0, out=None, **kwargs):
r"""
round(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
def round_(x, decimals=0, out=None, **kwargs):
r"""
round_(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
from ...numpy import ndarray
if isinstance(x, numeric_types):
return _np.around(x, decimals, **kwargs)
elif isinstance(x, ndarray):
return _npi.around(x, decimals, out=out, **kwargs)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
r"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : ndarray or scalar
`y`-coordinates.
x2 : ndarray or scalar
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if
`x1` and `x2` are scalars.
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> x = np.array([1, -1])
>>> y = np.array([0, 0])
>>> np.arctan2(x, y)
array([ 1.5707964, -1.5707964])
"""
return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2,
_npi.arctan2_scalar, _npi.rarctan2_scalar, out=out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
r"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
Parameters
----------
x1, x2 : ndarray
Leg of the triangle(s).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)
array([12, 1], dtype=int32)
>>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))
array([0, 1], dtype=int32)
>>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))
array([ 2, 4, 16], dtype=int32)
>>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_xor(13, 17)
28
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), 5)
array([26, 6])
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([26, 5])
>>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise OR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_or(13, 17)
29
>>> np.bitwise_or(31, 5)
31
>>> np.bitwise_or(np.array([31,3], dtype='int32'), 5)
array([31, 7])
>>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([31, 7])
>>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, True])
"""
return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : ndarray or scalar
Array of multipliers.
x2 : ndarray or scalar, int
Array of twos exponents.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Different from numpy, we allow x2 to be float besides int.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.])
"""
return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out)
@set_module('mxnet.ndarray.numpy')
def inner(a, b):
r"""
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : ndarray
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
"""
return tensordot(a, b, [-1, -1])
@set_module('mxnet.ndarray.numpy')
def outer(a, b):
r"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) ndarray
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) ndarray
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to N dimensions and other operations.
``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
"""
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.ndarray.numpy')
def vdot(a, b):
r"""
Return the dot product of two vectors.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : ndarray
First argument to the dot product.
b : ndarray
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.ndarray.numpy')
def equal(x1, x2, out=None):
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.equal(1, np.ones(1))
array([ True])
"""
return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out)
@set_module('mxnet.ndarray.numpy')
def greater(x1, x2, out=None):
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar,
_npi.less_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less(x1, x2, out=None):
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.less(1, np.ones(1))
array([False])
"""
return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out)
@set_module('mxnet.ndarray.numpy')
def greater_equal(x1, x2, out=None):
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar,
_npi.less_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def less_equal(x1, x2, out=None):
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.less_equal(1, np.ones(1))
array([True])
"""
return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar,
_npi.greater_equal_scalar, out)
@set_module('mxnet.ndarray.numpy')
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : ndarray
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], 'int')
>>> m
array([[1, 2],
[3, 4]], dtype=int64)
>>> np.rot90(m)
array([[2, 4],
[1, 3]], dtype=int64)
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]], dtype=int64)
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1., 3.],
[0., 2.]],
[[5., 7.],
[4., 6.]]])
"""
return _npi.rot90(m, k=k, axes=axes)
@set_module('mxnet.ndarray.numpy')
def einsum(*operands, **kwargs):
r"""
einsum(subscripts, *operands, out=None, optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of ndarray
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
optimize : {False, True}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
Notes
-----
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`np.trace`.
* Return a diagonal, :py:func:`np.diag`.
* Array axis summations, :py:func:`np.sum`.
* Transpositions and permutations, :py:func:`np.transpose`.
* Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.
* Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.
* Tensor contractions, :py:func:`np.tensordot`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <np.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
The ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. 'optimal' is not supported
for now.
This function differs from the original `numpy.einsum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in
the following way(s):
- Does not support 'optimal' strategy
- Does not support the alternative subscript like
`einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`
- Does not produce view in any cases
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
array(60.)
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0., 6., 12., 18., 24.])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10., 35., 60., 85., 110.])
>>> np.sum(a, axis=1)
array([ 10., 35., 60., 85., 110.])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10., 35., 60., 85., 110.])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.einsum('ij->ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.transpose(c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
Vector inner products:
>>> np.einsum('i,i', b, b)
array(30.)
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.dot(a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.einsum('...j,j', a, b)
array([ 30., 80., 130., 180., 230.])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.einsum(',ij', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.multiply(3, c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0., 1., 2., 3., 4.],
[0., 2., 4., 6., 8.]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('k...,jk', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path. Performance
improvements can be particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
# Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
# Greedy `einsum` (faster optimal path approximation): ~0.117ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)
"""
# Grab non-einsum kwargs; do not optimize by default.
optimize_arg = kwargs.pop('optimize', False)
out = kwargs.pop('out', None)
subscripts = operands[0]
operands = operands[1:]
return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg))
@set_module('mxnet.ndarray.numpy')
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
Parameters
----------
a : ndarray
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
ndarray.nonzero :
Equivalent ndarray method.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]], dtype=int32)
>>> np.nonzero(x)
(array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.stack(np.nonzero(x)))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]], dtype=int64)
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9], dtype=int32)
>>> a[a > 3]
array([4, 5, 6, 7, 8, 9], dtype=int32)
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
"""
out = _npi.nonzero(a).transpose()
return tuple([out[i] for i in range(len(out))])
@set_module('mxnet.ndarray.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : ndarray
Input array
q : ndarray
Percentile or sequence of percentiles to compute.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The default is to
compute the percentile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have the same
shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
overwrite_input : bool, optional (Not supported yet)
If True, then allow the input array a to be modified by intermediate calculations,
to save memory. In this case, the contents of the input a after this function
completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use when the
desired percentile lies between two data points i < j:
'linear': i + (j - i) * fraction, where fraction is the fractional part of the
index surrounded by i and j.
'lower': i.
'higher': j.
'nearest': i or j, whichever is nearest.
'midpoint': (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
Returns
-------
percentile : scalar or ndarray
Output array.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, np.array(50))
array(3.5)
>>> np.percentile(a, np.array(50), axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, np.array(50), axis=1)
array([7., 2.])
>>> np.percentile(a, np.array(50), axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, np.array(50), axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, np.array(50), axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q, out=out)
return _npi.percentile(a, q, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th quantile of the data along the specified axis.
New in version 1.15.0.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
q : ndarray
Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed.
The default is to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j:
linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j, whichever is nearest.
midpoint: (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original array a.
Returns
-------
quantile : ndarray
If q is a single quantile and axis=None, then the result is a scalar.
If multiple quantiles are given, first axis of the result corresponds to the quantiles.
The other axes are the axes that remain after the reduction of a.
If out is specified, that array is returned instead.
See also
--------
mean
Notes
-----
Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum
to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors
as well as the interpolation parameter will determine the quantile if the normalized ranking
does not match the location of q exactly. This function is the same as the median if q=0.5,
the same as the minimum if q=0.0 and the same as the maximum if q=1.0.
This function differs from the original `numpy.quantile
<https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in
the following aspects:
- q must be ndarray type even if it is a scalar
- do not support overwrite_input
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10., 7., 4.],
[3., 2., 1.]])
>>> q = np.array(0.5)
>>> q
array(0.5)
>>> np.quantile(a, q)
array(3.5)
>>> np.quantile(a, q, axis=0)
array([6.5, 4.5, 2.5])
>>> np.quantile(a, q, axis=1)
array([7., 2.])
>>> np.quantile(a, q, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.quantile(a, q, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, q, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> out
array([6.5, 4.5, 2.5])
"""
if overwrite_input is not None:
raise NotImplementedError('overwrite_input is not supported yet')
if isinstance(q, numeric_types):
return _npi.percentile(a, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=q * 100, out=out)
return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation,
keepdims=keepdims, q_scalar=None, out=out)
@set_module('mxnet.ndarray.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `may_share_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `shares_memory` in MXNet DeepNumPy
"""
return _npi.share_memory(a, b).item()
@set_module('mxnet.ndarray.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name
r"""
Calculate the n-th discrete difference along the given axis.
Parameters
----------
a : ndarray
Input array
n : int, optional
The number of times values are differenced. If zero, the input is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
prepend, append : ndarray, optional
Not supported yet
Returns
-------
diff : ndarray
The n-th differences.
The shape of the output is the same as a except along axis where the dimension is smaller by n.
The type of the output is the same as the type of the difference between any two elements of a.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
Notes
-----
Optional inputs `prepend` and `append` are not supported yet
"""
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _npi.diff(a, n=n, axis=axis)
@set_module('mxnet.ndarray.numpy')
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : ndarray
If necessary, will be flattened before the differences are taken.
to_end : ndarray or scalar, optional
Number(s) to append at the end of the returned differences.
to_begin : ndarray or scalar, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1., 2., 3., -7.])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
rray([-99., 1., 2., 3., -7., 88., 99.])
The returned array is always 1D.
>>> y = np.array([[1, 2, 4], [1, 6, 24]])
>>> np.ediff1d(y)
array([ 1., 2., -3., 5., 18.])
>>> np.ediff1d(x, to_begin=y)
array([ 1., 2., 4., 1., 6., 24., 1., 2., 3., -7.])
"""
from ...numpy import ndarray as np_ndarray
input_type = (isinstance(to_begin, np_ndarray), isinstance(to_end, np_ndarray))
# case 1: when both `to_begin` and `to_end` are arrays
if input_type == (True, True):
return _npi.ediff1d(ary, to_begin, to_end, to_begin_arr_given=True, to_end_arr_given=True,
to_begin_scalar=None, to_end_scalar=None)
# case 2: only `to_end` is array but `to_begin` is scalar/None
elif input_type == (False, True):
return _npi.ediff1d(ary, to_end, to_begin_arr_given=False, to_end_arr_given=True,
to_begin_scalar=to_begin, to_end_scalar=None)
# case 3: only `to_begin` is array but `to_end` is scalar/None
elif input_type == (True, False):
return _npi.ediff1d(ary, to_begin, to_begin_arr_given=True, to_end_arr_given=False,
to_begin_scalar=None, to_end_scalar=to_end)
# case 4: both `to_begin` and `to_end` are scalar/None
else:
return _npi.ediff1d(ary, to_begin_arr_given=False, to_end_arr_given=False,
to_begin_scalar=to_begin, to_end_scalar=to_end)
@set_module('mxnet.ndarray.numpy')
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : ndarray
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> np.resize(a, (2, 3))
array([[0., 1., 2.],
[3., 0., 1.]])
>>> np.resize(a, (1, 4))
array([[0., 1., 2., 3.]])
>>> np.resize(a,(2, 4))
array([[0., 1., 2., 3.],
[0., 1., 2., 3.]])
"""
return _npi.resize_fallback(a, new_shape=new_shape)
@set_module('mxnet.ndarray.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : ndarray
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.13
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,
1.2800000e+02])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,
1.2800000e+02])
>>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype="float64")/0
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y)
array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],
[ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
"""
if isinstance(x, numeric_types):
return _np.nan_to_num(x, copy, nan, posinf, neginf) # pylint: disable=too-many-function-args
elif isinstance(x, NDArray):
if x.dtype in ['int8', 'uint8', 'int32', 'int64']:
return x
if not copy:
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x)
return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None)
else:
raise TypeError('type {} not supported'.format(str(type(x))))
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is NaN, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This function differs from the original `numpy.isinf
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))
array([ True, False, False])
"""
return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
"""
Test element-wise for positive or negative infinity.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive or negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
This function differs from the original `numpy.isnan
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool_)
>>> np.isinf(x, y)
array([ True, False, True])
>>> y
array([ True, False, True])
"""
return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs)
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isposinf(np.inf)
True
>>> np.isposinf(-np.inf)
False
>>> np.isposinf(np.nan)
False
>>> np.isposinf(np.array([-np.inf, 0., np.inf]))
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isposinf(x, y)
array([False, False, True])
>>> y
array([False, False, True])
"""
return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isneginf(-np.inf)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf(float('-inf'))
True
>>> np.isneginf(np.array([-np.inf, 0., np.inf]))
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isneginf(x, y)
array([ True, False, False])
>>> y
array([ True, False, False])
"""
return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
"""
Test element-wise for finiteness (not infinity or not Not a Number).
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
Not a Number, positive infinity and negative infinity are considered to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity.
But infinity is equivalent to positive infinity. Errors result if the second argument
is also supplied when x is a scalar input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(-np.inf)
False
>>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isfinite(x, y)
array([False, True, False])
>>> y
array([False, True, False])
"""
return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
def where(condition, x=None, y=None): # pylint: disable=too-many-return-statements
"""where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. The rest of this documentation
covers only the case where all three arguments are provided.
Parameters
----------
condition : ndarray
Where True, yield `x`, otherwise yield `y`.
x, y : ndarray
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape. `x` and `y` must have the same dtype.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
This function differs from the original `numpy.where
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html>`_ in
the following way(s):
- If `condition` is a scalar, this operator returns x or y directly without broadcasting.
- If `condition` is ndarray, while both `x` and `y` are scalars,
the output dtype will be `float32`.
Examples
--------
>>> a = np.arange(10)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.where(a < 5, a, 10*a)
array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])
This can be used on multidimensional arrays too:
>>> cond = np.array([[True, False], [True, True]])
>>> x = np.array([[1, 2], [3, 4]])
>>> y = np.array([[9, 8], [7, 6]])
>>> np.where(cond, x, y)
array([[1., 8.],
[3., 4.]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = onp.ogrid[:3, :4]
>>> x = np.array(x)
>>> y = np.array(y)
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]], dtype=int64)
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0., 1., 2.],
[ 0., 2., -1.],
[ 0., 3., -1.]])
"""
if x is None and y is None:
return nonzero(condition)
else:
if isinstance(condition, numeric_types):
if condition != 0:
return x
else:
return y
else:
if isinstance(x, numeric_types) and isinstance(y, numeric_types):
return _npi.where_scalar2(condition, float(x), float(y), out=None)
elif isinstance(x, NDArray) and isinstance(y, NDArray):
return _npi.where(condition, x, y, out=None)
elif isinstance(y, NDArray):
return _npi.where_lscalar(condition, y, float(x), out=None)
elif isinstance(x, NDArray):
return _npi.where_rscalar(condition, x, float(y), out=None)
else:
raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y))))
@set_module('mxnet.ndarray.numpy')
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
Notes
-----
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
- Does not support poly1d.
- X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.])
"""
from ...numpy import ndarray
if isinstance(p, ndarray) and isinstance(x, ndarray):
return _npi.polyval(p, x)
elif not isinstance(p, ndarray) and not isinstance(x, ndarray):
return _np.polyval(p, x)
else:
raise TypeError('type not supported')
@set_module('mxnet.ndarray.numpy')
def bincount(x, weights=None, minlength=0):
"""
Count number of occurrences of each value in array of non-negative ints.
Parameters
----------
x : ndarray
input array, 1 dimension, nonnegative ints.
weights: ndarray
input weigths same shape as x. (Optional)
minlength: int
A minimum number of bins for the output. (Optional)
Returns
--------
out : ndarray
the result of binning the input array. The length of out is equal to amax(x)+1.
Raises
--------
Value Error
If the input is not 1-dimensional, or contains elements with negative values,
or if minlength is negative
TypeError
If the type of the input is float or complex.
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
if not isinstance(x, NDArray):
raise TypeError("Input data should be NDarray")
if minlength < 0:
raise ValueError("Minlength value should greater than 0")
if weights is None:
return _npi.bincount(x, minlength=minlength, has_weights=False)
return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True)
@set_module('mxnet.ndarray.numpy')
def pad(x, pad_width, mode='constant', **kwargs): # pylint: disable=too-many-arguments
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
"""
# pylint: disable = too-many-return-statements, inconsistent-return-statements
if not _np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
if not isinstance(pad_width, tuple):
raise TypeError("`pad_width` must be tuple.")
if mode == "linear_ramp":
raise ValueError("mode {'linear_ramp'} is not supported.")
if mode == "wrap":
raise ValueError("mode {'wrap'} is not supported.")
if mode == "median":
raise ValueError("mode {'median'} is not supported.")
if mode == "mean":
raise ValueError("mode {'mean'} is not supported.")
if mode == "empty":
raise ValueError("mode {'empty'} is not supported.")
if callable(mode):
raise ValueError("mode {'<function>'} is not supported.")
allowedkwargs = {
'constant': ['constant_values'],
'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
'wrap': [],
}
if isinstance(mode, _np.compat.basestring):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
raise ValueError('%s keyword not in allowed keywords %s' %(key, allowedkwargs[mode]))
unsupported_kwargs = set(kwargs) - set(allowedkwargs[mode])
if unsupported_kwargs:
raise ValueError("unsupported keyword arguments for mode '{}': {}"
.format(mode, unsupported_kwargs))
if mode == "constant":
values = kwargs.get("constant_values", 0)
if isinstance(values, tuple):
raise TypeError("unsupported constant_values type: {'tuple'}.")
_npi.pad(x, pad_width, mode='constant', constant_value=values)
elif mode == "symmetric":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='symmetric', reflect_type="even")
elif mode == "edge":
return _npi.pad(x, pad_width, mode='edge')
elif mode == "reflect":
values = kwargs.get("reflect_type", "even")
if values != "even" and values is not None:
raise ValueError("unsupported reflect_type '{}'".format(values))
return _npi.pad(x, pad_width, mode='reflect', reflect_type="even")
elif mode == "maximum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='maximum')
elif mode == "minimum":
values = kwargs.get("stat_length", None)
if values is not None:
raise ValueError("unsupported stat_length '{}'".format(values))
return _npi.pad(x, pad_width, mode='minimum')
return _npi.pad(x, pad_width, mode='constant', constant_value=0)
| 33.707253
| 141
| 0.604778
|
ebd4f509239aa3bad05e2109c811ad26e9faa3c9
| 5,030
|
py
|
Python
|
scripts/visualize.py
|
nodchip/icfpc2021
|
e50f0172fd62097049dab19c01875c57468a13f1
|
[
"MIT"
] | 1
|
2021-07-12T13:52:18.000Z
|
2021-07-12T13:52:18.000Z
|
scripts/visualize.py
|
nodchip/icfpc2021
|
e50f0172fd62097049dab19c01875c57468a13f1
|
[
"MIT"
] | null | null | null |
scripts/visualize.py
|
nodchip/icfpc2021
|
e50f0172fd62097049dab19c01875c57468a13f1
|
[
"MIT"
] | 1
|
2021-08-24T08:49:18.000Z
|
2021-08-24T08:49:18.000Z
|
#!/usr/bin/python3
import argparse
import json
import matplotlib.pyplot as plt
import os
def get_x(point):
return point[0]
def get_y(point):
return point[1]
def visualize(problem_file_path, pose_file_path=None, output_file_path=None, does_draw_figure=True):
'''一つの問題を視覚化する。
problem_file_path [必須] 問題ファイルパス。
pose_file_path [任意] ポーズ (解答) ファイルパス。指定した場合、ポーズも描画する。
output_file_path [任意] 出力画像ファイルパス。指定しない場合、ウィンドウに出力する。指定した場合、画像ファイルに出力する。
does_draw_figure [任意] False にすると problem 内の figure を描画しない。
'''
with open(problem_file_path, 'r') as file:
problem = json.load(file)
# y軸を逆さまにする
# Matplotlib-y軸が反転したグラフの描き方 | DATUM STUDIO株式会社 https://datumstudio.jp/blog/matplotlib-y%E8%BB%B8%E3%81%8C%E5%8F%8D%E8%BB%A2%E3%81%97%E3%81%9F%E3%82%B0%E3%83%A9%E3%83%95%E3%81%AE%E6%8F%8F%E3%81%8D%E6%96%B9/
min_x = min(problem['hole'] + problem['figure']['vertices'], key=get_x)[0]
max_x = max(problem['hole'] + problem['figure']['vertices'], key=get_x)[0]
min_y = min(problem['hole'] + problem['figure']['vertices'], key=get_y)[1]
max_y = max(problem['hole'] + problem['figure']['vertices'], key=get_y)[1]
axes = plt.axes(facecolor='pink')
axes.set_ylim([max_y + 1, min_y - 1])
# 縦横比を1:1にする
axes.set_aspect('equal')
# holeを描画する
plt.fill(*zip(*problem['hole']), facecolor='white')
# bonusesを描画する
for bonus in problem['bonuses']:
color = 'darkred'
if bonus['bonus'] == 'GLOBALIST':
color = 'yellow'
if bonus['bonus'] == 'BREAK_A_LEG':
color = 'blue'
if bonus['bonus'] == 'WALLHACK':
color = 'orange'
if bonus['bonus'] == 'SUPERFLEX':
color = 'cyan'
axes.add_patch(plt.Circle(bonus['position'], radius=5, color=color, alpha=0.5))
# figureを描画する
if does_draw_figure:
for edge in problem['figure']['edges']:
src = problem['figure']['vertices'][edge[0]]
dst = problem['figure']['vertices'][edge[1]]
color = 'gray' if pose_file_path else 'red'
plt.plot(*zip(src, dst), color=color)
# poseを描画する
if pose_file_path:
epsilon = problem['epsilon']
with open(pose_file_path, 'r') as file:
pose = json.load(file)
for edge in problem['figure']['edges']:
src = problem['figure']['vertices'][edge[0]]
dst = problem['figure']['vertices'][edge[1]]
d0 = sum((s - d) ** 2 for s, d in zip(src, dst))
src = pose['vertices'][edge[0]]
dst = pose['vertices'][edge[1]]
d1 = sum((s - d) ** 2 for s, d in zip(src, dst))
color = 'blue'
satisfied = 10**6 * abs(d1 - d0) <= d0 * epsilon
if not satisfied:
color = 'red' if d1 > d0 else 'cyan'
plt.plot(*zip(src, dst), color=color)
if output_file_path:
plt.savefig(output_file_path)
else:
plt.show()
plt.close()
def main():
parser = argparse.ArgumentParser(description='Brain Wall Visualizer')
parser.add_argument('--problem', action='store',
type=str, help='Problem file path. Visualize one file if specified.')
parser.add_argument('--pose', action='store',
type=str, help='Pose file path.')
parser.add_argument('--problems', action='store', type=str,
help='Problem folder path. Use to convert json files to image files.')
parser.add_argument('--output_image_directory_path', action='store', type=str,
default='.',
help='Problem folder path. Use to convert json files to image files.')
parser.add_argument('--no_draw_figure', action='store_false',
default=True, dest='does_draw_figure',
help='Do not draw figure in problem.')
args = parser.parse_args()
if args.problem:
assert args.pose, 'You have to specify --pose'
output_file_path = None
if args.output_image_directory_path:
pose_file_name = os.path.basename(args.pose)
output_file_name = os.path.splitext(pose_file_name)[0] + ".png"
output_file_path = os.path.join(
args.output_image_directory_path, output_file_name)
visualize(args.problem, args.pose, output_file_path, args.does_draw_figure)
if args.problems:
assert args.output_image_directory_path, 'You have to specify --output_image_directory_path'
for problem_file_name in os.listdir(args.problems):
print(problem_file_name)
problem_file_path = os.path.join(args.problems, problem_file_name)
output_file_name = os.path.splitext(problem_file_name)[0] + ".png"
output_file_path = os.path.join(
args.output_image_directory_path, output_file_name)
visualize(problem_file_path, None, output_file_path, args.does_draw_figure)
if __name__ == '__main__':
main()
| 38.992248
| 209
| 0.608748
|
0cddc470784965ec277967f7f3d1c8d759b7aba1
| 4,270
|
py
|
Python
|
nmm/wrap.py
|
EBI-Metagenomics/nmm-py
|
11422e34dacba74cfc0521779749928c67306d37
|
[
"MIT"
] | null | null | null |
nmm/wrap.py
|
EBI-Metagenomics/nmm-py
|
11422e34dacba74cfc0521779749928c67306d37
|
[
"MIT"
] | null | null | null |
nmm/wrap.py
|
EBI-Metagenomics/nmm-py
|
11422e34dacba74cfc0521779749928c67306d37
|
[
"MIT"
] | null | null | null |
from typing import Dict, Optional
import imm
from ._alphabet import Alphabet, AlphabetType, AminoAlphabet, BaseAlphabet
from ._base_lprob import BaseLprob
from ._cdata import CData
from ._codon_lprob import CodonLprob
from ._codon_marg import CodonMarg
from ._ffi import ffi, lib
from ._profile import Profile
from ._state import CodonState, FrameState, StateType
__all__ = ["imm_abc", "imm_state", "nmm_profile"]
def imm_abc(ptr: CData):
try:
alphabet_type = AlphabetType(imm.lib.imm_abc_type_id(ptr))
except ValueError:
return imm.Alphabet(ptr)
if alphabet_type == AlphabetType.BASE:
nmm_base_abc = lib.nmm_base_abc_derived(ptr)
return BaseAlphabet(nmm_base_abc)
if alphabet_type == AlphabetType.AMINO:
nmm_amino_abc = lib.nmm_amino_abc_derived(ptr)
return AminoAlphabet(nmm_amino_abc)
raise RuntimeError("It should not get here.")
def imm_state(
ptr: CData,
alphabet: Alphabet,
base_lprobs: Dict[CData, BaseLprob],
codon_lprobs: Dict[CData, CodonLprob],
codon_margs: Dict[CData, CodonMarg],
) -> imm.State:
try:
state_type = StateType(imm.lib.imm_state_type_id(ptr))
except ValueError:
return imm.wrap.imm_state(ptr, alphabet)
if state_type == StateType.CODON:
nmm_codon_state = lib.nmm_codon_state_derived(ptr)
if nmm_codon_state == ffi.NULL:
raise RuntimeError("`nmm_codon_state` is NULL.")
nmm_codon_lprob = lib.nmm_codon_state_codon_lprob(nmm_codon_state)
codonp = codon_lprobs[nmm_codon_lprob]
return CodonState(nmm_codon_lprob, codonp)
if state_type == StateType.FRAME:
nmm_frame_state = lib.nmm_frame_state_derived(ptr)
if nmm_frame_state == ffi.NULL:
raise RuntimeError("`nmm_frame_state` is NULL.")
nmm_base_lprob = lib.nmm_frame_state_base_lprob(nmm_frame_state)
nmm_codon_marg = lib.nmm_frame_state_codon_marg(nmm_frame_state)
basep = base_lprobs[nmm_base_lprob]
codonm = codon_margs[nmm_codon_marg]
return FrameState(nmm_frame_state, basep, codonm)
raise ValueError(f"Unknown state type: {imm.lib.imm_state_type_id(ptr)}.")
def nmm_profile(ptr: CData, abc: Optional[BaseAlphabet] = None) -> Profile:
if abc is None:
tmp_abc = imm_abc(lib.nmm_profile_abc(ptr))
assert isinstance(tmp_abc, BaseAlphabet)
abc = tmp_abc
base_lprobs = read_base_lprobs(ptr, abc)
codon_lprobs = read_codon_lprobs(ptr, abc)
codon_margs = read_cond_margs(ptr, abc)
prof = Profile(ptr, abc)
for i in range(lib.nmm_profile_nmodels(ptr)):
imm_model = lib.nmm_profile_get_model(ptr, i)
states = {}
for j in range(imm.lib.imm_model_nstates(imm_model)):
state_ptr = imm.lib.imm_model_state(imm_model, j)
states[state_ptr] = imm_state(
state_ptr, abc, base_lprobs, codon_lprobs, codon_margs
)
hmm = imm.HMM(imm.lib.imm_model_hmm(imm_model), abc, states)
dp = imm.DP(imm.lib.imm_model_dp(imm_model), hmm)
model = imm.Model(imm_model, hmm, dp)
prof.append_model(model)
return prof
def read_base_lprobs(nmm_profile: CData, abc: BaseAlphabet) -> Dict[CData, BaseLprob]:
base_lprobs: Dict[CData, BaseLprob] = {}
for i in range(lib.nmm_profile_nbase_lprobs(nmm_profile)):
nmm_base_lprob = lib.nmm_profile_base_lprob(nmm_profile, i)
base_lprobs[nmm_base_lprob] = BaseLprob(nmm_base_lprob, abc)
return base_lprobs
def read_codon_lprobs(nmm_profile: CData, abc: BaseAlphabet) -> Dict[CData, CodonLprob]:
codon_lprobs: Dict[CData, CodonLprob] = {}
for i in range(lib.nmm_profile_ncodon_lprobs(nmm_profile)):
nmm_codon_lprob = lib.nmm_profile_codon_lprob(nmm_profile, i)
codon_lprobs[nmm_codon_lprob] = CodonLprob(nmm_codon_lprob, abc)
return codon_lprobs
def read_cond_margs(nmm_profile: CData, abc: BaseAlphabet) -> Dict[CData, CodonMarg]:
codon_margs: Dict[CData, CodonMarg] = {}
for i in range(lib.nmm_profile_ncodon_margs(nmm_profile)):
nmm_codon_marg = lib.nmm_profile_codon_marg(nmm_profile, i)
codon_margs[nmm_codon_marg] = CodonMarg(nmm_codon_marg, abc)
return codon_margs
| 35.289256
| 88
| 0.708665
|
3deb36780335341bd55d10190bf1b349e06873c8
| 789
|
py
|
Python
|
Virtualenv/Env/src/GoTravel/Home/migrations/0001_initial.py
|
Anoop01234/Go-Travel
|
aa91f1a4ce7e7ed78de8eadc55e6a25d1a73bdd8
|
[
"MIT"
] | null | null | null |
Virtualenv/Env/src/GoTravel/Home/migrations/0001_initial.py
|
Anoop01234/Go-Travel
|
aa91f1a4ce7e7ed78de8eadc55e6a25d1a73bdd8
|
[
"MIT"
] | null | null | null |
Virtualenv/Env/src/GoTravel/Home/migrations/0001_initial.py
|
Anoop01234/Go-Travel
|
aa91f1a4ce7e7ed78de8eadc55e6a25d1a73bdd8
|
[
"MIT"
] | 1
|
2021-12-21T17:27:34.000Z
|
2021-12-21T17:27:34.000Z
|
# Generated by Django 3.0.7 on 2020-06-29 04:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Chef',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('position', models.CharField(max_length=50)),
('description', models.TextField()),
('photo', models.ImageField(upload_to='chef/')),
],
options={
'verbose_name': 'Chef',
'verbose_name_plural': 'Chefs',
},
),
]
| 27.206897
| 114
| 0.529785
|
daac82a6606c3ccf87b9498fd45f62772bca1da0
| 9,012
|
py
|
Python
|
ASNativeActivity/phoenixas/src/main/cpp/deps/SPIRV-Cross/test_shaders.py
|
playbar/android-ndk
|
34e79bc1de9caa27faa72f5f1fb4ad3202debdc6
|
[
"Apache-2.0"
] | 1
|
2017-06-01T01:20:57.000Z
|
2017-06-01T01:20:57.000Z
|
ASNativeActivity/phoenixas/src/main/cpp/deps/SPIRV-Cross/test_shaders.py
|
playbar/android-ndk
|
34e79bc1de9caa27faa72f5f1fb4ad3202debdc6
|
[
"Apache-2.0"
] | 1
|
2018-07-17T07:09:17.000Z
|
2018-07-17T07:09:17.000Z
|
ASNativeActivity/phoenixas/src/main/cpp/deps/SPIRV-Cross/test_shaders.py
|
playbar/android-ndk
|
34e79bc1de9caa27faa72f5f1fb4ad3202debdc6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import os
import subprocess
import tempfile
import re
import itertools
import hashlib
import shutil
import argparse
def parse_stats(stats):
m = re.search('([0-9]+) work registers', stats)
registers = int(m.group(1)) if m else 0
m = re.search('([0-9]+) uniform registers', stats)
uniform_regs = int(m.group(1)) if m else 0
m_list = re.findall('(-?[0-9]+)\s+(-?[0-9]+)\s+(-?[0-9]+)', stats)
alu_short = float(m_list[1][0]) if m_list else 0
ls_short = float(m_list[1][1]) if m_list else 0
tex_short = float(m_list[1][2]) if m_list else 0
alu_long = float(m_list[2][0]) if m_list else 0
ls_long = float(m_list[2][1]) if m_list else 0
tex_long = float(m_list[2][2]) if m_list else 0
return (registers, uniform_regs, alu_short, ls_short, tex_short, alu_long, ls_long, tex_long)
def get_shader_type(shader):
_, ext = os.path.splitext(shader)
if ext == '.vert':
return '--vertex'
elif ext == '.frag':
return '--fragment'
elif ext == '.comp':
return '--compute'
elif ext == '.tesc':
return '--tessellation_control'
elif ext == '.tese':
return '--tessellation_evaluation'
elif ext == '.geom':
return '--geometry'
else:
return ''
def get_shader_stats(shader):
f, path = tempfile.mkstemp()
os.close(f)
p = subprocess.Popen(['malisc', get_shader_type(shader), '--core', 'Mali-T760', '-V', shader], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
os.remove(path)
if p.returncode != 0:
print(stderr.decode('utf-8'))
raise OSError('malisc failed')
p.wait()
returned = stdout.decode('utf-8')
return parse_stats(returned)
def validate_shader(shader, vulkan):
if vulkan:
subprocess.check_call(['glslangValidator', '-V', shader])
else:
subprocess.check_call(['glslangValidator', shader])
def cross_compile(shader, vulkan, spirv, eliminate, invalid_spirv):
spirv_f, spirv_path = tempfile.mkstemp()
glsl_f, glsl_path = tempfile.mkstemp(suffix = os.path.basename(shader))
os.close(spirv_f)
os.close(glsl_f)
if vulkan or spirv:
vulkan_glsl_f, vulkan_glsl_path = tempfile.mkstemp(suffix = os.path.basename(shader))
os.close(vulkan_glsl_f)
if spirv:
subprocess.check_call(['spirv-as', '-o', spirv_path, shader])
else:
subprocess.check_call(['glslangValidator', '-V', '-o', spirv_path, shader])
if not invalid_spirv:
subprocess.check_call(['spirv-val', spirv_path])
spirv_cross_path = './spirv-cross'
if eliminate:
subprocess.check_call([spirv_cross_path, '--remove-unused-variables', '--entry', 'main', '--output', glsl_path, spirv_path])
else:
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', glsl_path, spirv_path])
# A shader might not be possible to make valid GLSL from, skip validation for this case.
if (not ('nocompat' in glsl_path)) and (not spirv):
validate_shader(glsl_path, False)
if vulkan or spirv:
if eliminate:
subprocess.check_call([spirv_cross_path, '--remove-unused-variables', '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path])
else:
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path])
validate_shader(vulkan_glsl_path, vulkan)
return (spirv_path, glsl_path, vulkan_glsl_path if vulkan else None)
def md5_for_file(path):
md5 = hashlib.md5()
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
return md5.digest()
def make_reference_dir(path):
base = os.path.dirname(path)
if not os.path.exists(base):
os.makedirs(base)
def reference_path(directory, relpath):
split_paths = os.path.split(directory)
reference_dir = os.path.join(split_paths[0], 'reference/')
reference_dir = os.path.join(reference_dir, split_paths[1])
return os.path.join(reference_dir, relpath)
def regression_check(shader, glsl, update, keep):
reference = reference_path(shader[0], shader[1])
joined_path = os.path.join(shader[0], shader[1])
print('Reference shader path:', reference)
if os.path.exists(reference):
if md5_for_file(glsl) != md5_for_file(reference):
if update:
print('Generated GLSL has changed for {}!'.format(reference))
# If we expect changes, update the reference file.
if os.path.exists(reference):
os.remove(reference)
make_reference_dir(reference)
shutil.move(glsl, reference)
else:
print('Generated GLSL in {} does not match reference {}!'.format(glsl, reference))
with open(glsl, 'r') as f:
print('')
print('Generated:')
print('======================')
print(f.read())
print('======================')
print('')
# Otherwise, fail the test. Keep the shader file around so we can inspect.
if not keep:
os.remove(glsl)
sys.exit(1)
else:
os.remove(glsl)
else:
print('Found new shader {}. Placing GLSL in {}'.format(joined_path, reference))
make_reference_dir(reference)
shutil.move(glsl, reference)
def shader_is_vulkan(shader):
return '.vk.' in shader
def shader_is_desktop(shader):
return '.desktop.' in shader
def shader_is_eliminate_dead_variables(shader):
return '.noeliminate.' not in shader
def shader_is_spirv(shader):
return '.asm.' in shader
def shader_is_invalid_spirv(shader):
return '.invalid.' in shader
def test_shader(stats, shader, update, keep):
joined_path = os.path.join(shader[0], shader[1])
vulkan = shader_is_vulkan(shader[1])
desktop = shader_is_desktop(shader[1])
eliminate = shader_is_eliminate_dead_variables(shader[1])
is_spirv = shader_is_spirv(shader[1])
invalid_spirv = shader_is_invalid_spirv(shader[1])
print('Testing shader:', joined_path)
spirv, glsl, vulkan_glsl = cross_compile(joined_path, vulkan, is_spirv, eliminate, invalid_spirv)
# Only test GLSL stats if we have a shader following GL semantics.
if stats and (not vulkan) and (not is_spirv) and (not desktop):
cross_stats = get_shader_stats(glsl)
regression_check(shader, glsl, update, keep)
if vulkan_glsl:
regression_check((shader[0], shader[1] + '.vk'), vulkan_glsl, update, keep)
os.remove(spirv)
if stats and (not vulkan) and (not is_spirv) and (not desktop):
pristine_stats = get_shader_stats(joined_path)
a = []
a.append(shader[1])
for i in pristine_stats:
a.append(str(i))
for i in cross_stats:
a.append(str(i))
print(','.join(a), file = stats)
def test_shaders_helper(stats, shader_dir, update, malisc, keep):
for root, dirs, files in os.walk(os.path.join(shader_dir)):
for i in files:
path = os.path.join(root, i)
relpath = os.path.relpath(path, shader_dir)
test_shader(stats, (shader_dir, relpath), update, keep)
def test_shaders(shader_dir, update, malisc, keep):
if malisc:
with open('stats.csv', 'w') as stats:
print('Shader,OrigRegs,OrigUniRegs,OrigALUShort,OrigLSShort,OrigTEXShort,OrigALULong,OrigLSLong,OrigTEXLong,CrossRegs,CrossUniRegs,CrossALUShort,CrossLSShort,CrossTEXShort,CrossALULong,CrossLSLong,CrossTEXLong', file = stats)
test_shaders_helper(stats, shader_dir, update, malisc, keep)
else:
test_shaders_helper(None, shader_dir, update, malisc, keep)
def main():
parser = argparse.ArgumentParser(description = 'Script for regression testing.')
parser.add_argument('folder',
help = 'Folder containing shader files to test.')
parser.add_argument('--update',
action = 'store_true',
help = 'Updates reference files if there is a mismatch. Use when legitimate changes in output is found.')
parser.add_argument('--keep',
action = 'store_true',
help = 'Leave failed GLSL shaders on disk if they fail regression. Useful for debugging.')
parser.add_argument('--malisc',
action = 'store_true',
help = 'Use malisc offline compiler to determine static cycle counts before and after spirv-cross.')
args = parser.parse_args()
if not args.folder:
sys.stderr.write('Need shader folder.\n')
sys.exit(1)
test_shaders(args.folder, args.update, args.malisc, args.keep)
if args.malisc:
print('Stats in stats.csv!')
print('Tests completed!')
if __name__ == '__main__':
main()
| 36.634146
| 237
| 0.637261
|
6249fdf0cbbe03e3e6d54084347e186da397dca5
| 3,324
|
py
|
Python
|
osm_validator/views.py
|
dimahaptar/osm-validator
|
fefae423ef61c0d6f60b92d60427fc021f3e23e7
|
[
"MIT"
] | 1
|
2019-09-24T19:35:29.000Z
|
2019-09-24T19:35:29.000Z
|
osm_validator/views.py
|
tbicr/osm-validator
|
0b03fe82edec55cf0d6f29d4f01907b32eab338d
|
[
"MIT"
] | 16
|
2017-09-20T19:08:28.000Z
|
2021-12-13T19:42:57.000Z
|
osm_validator/views.py
|
dimahaptar/osm-validator
|
fefae423ef61c0d6f60b92d60427fc021f3e23e7
|
[
"MIT"
] | 5
|
2017-09-25T17:18:19.000Z
|
2018-04-24T08:12:07.000Z
|
from functools import wraps
from aiohttp import web
from aiohttp_session import get_session
from psycopg2._psycopg import IntegrityError
from . import models
from .oauth import OSMOauthClient
def login_required(func):
"""
auth decorator
call function
"""
@wraps(func)
async def wrapped(request, **kwargs):
session = await get_session(request)
request.user = None
if 'user_id' in session:
user_id = session['user_id']
async with request.app.db.acquire() as conn:
request.user = models.User(**await (await conn.execute(
models.User.__table__.select().where(
models.User.__table__.c.osm_uid == user_id))
).fetchone())
else:
raise web.HTTPUnauthorized
return await func(request, **kwargs)
return wrapped
async def index(request):
return web.FileResponse('./static/index.html')
async def oauth_login(request):
oauth_client = OSMOauthClient(
consumer_key=request.app.config.OAUTH_OPENSTREETMAP_KEY,
consumer_secret=request.app.config.OAUTH_OPENSTREETMAP_SECRET)
request_token, request_token_secret, _ = await oauth_client.get_request_token()
await request.app.redis.oauth.set(
request_token,
request_token_secret,
expire=request.app.config.OAUTH_CACHE_EXPIRE)
return web.HTTPFound(oauth_client.get_authorize_url())
async def oauth_complete(request):
session = await get_session(request=request)
request_token = request.query['oauth_token']
request_token_secret = await request.app.redis.oauth.get(request_token)
if request_token_secret is None:
return web.HTTPFound(request.app.router['oauth:login'].url_for())
oauth_client = OSMOauthClient(
consumer_key=request.app.config.OAUTH_OPENSTREETMAP_KEY,
consumer_secret=request.app.config.OAUTH_OPENSTREETMAP_SECRET,
oauth_token=request_token,
oauth_token_secret=request_token_secret)
oauth_token, oauth_token_secret, _ = await oauth_client.get_access_token(
request_token)
oauth_client = OSMOauthClient(
consumer_key=request.app.config.OAUTH_OPENSTREETMAP_KEY,
consumer_secret=request.app.config.OAUTH_OPENSTREETMAP_SECRET,
oauth_token=oauth_token,
oauth_token_secret=oauth_token_secret)
user, _ = await oauth_client.user_info()
async with request.app.db.acquire() as conn:
try:
await conn.execute(models.User.__table__.insert().values(**user))
except IntegrityError:
await conn.execute(models.User.__table__.update().
where(models.User.__table__.c.osm_uid == user['osm_uid']).
values(**user))
session['user_id'] = user['osm_uid']
return web.HTTPFound(request.app.router['index'].url_for())
async def sign_out(request):
session = await get_session(request=request)
user_id = session['user_id'] if 'user_id' in session else None
if user_id:
del session['user_id']
url = request.app.router['index'].url_for()
return web.HTTPFound(url)
@login_required
async def user_info(request):
user = {'osm_user': request.user.osm_user}
return web.json_response(user, status=200)
| 34.989474
| 89
| 0.685921
|
5012c0219f489d6e9e5d4f329a90fffcc0b22a0d
| 2,557
|
py
|
Python
|
pyqtgraph/exporters/HDF5Exporter.py
|
leo603222/fix-displace-between-selection-area-and-mouse-pos
|
1f9031884a980432795b69487bd659f5e4ef91aa
|
[
"MIT"
] | 2,762
|
2015-01-02T14:34:10.000Z
|
2022-03-30T14:06:07.000Z
|
pyqtgraph/exporters/HDF5Exporter.py
|
leo603222/fix-displace-between-selection-area-and-mouse-pos
|
1f9031884a980432795b69487bd659f5e4ef91aa
|
[
"MIT"
] | 1,901
|
2015-01-12T03:20:30.000Z
|
2022-03-31T16:33:36.000Z
|
pyqtgraph/exporters/HDF5Exporter.py
|
leo603222/fix-displace-between-selection-area-and-mouse-pos
|
1f9031884a980432795b69487bd659f5e4ef91aa
|
[
"MIT"
] | 1,038
|
2015-01-01T04:05:49.000Z
|
2022-03-31T11:57:51.000Z
|
# -*- coding: utf-8 -*-
from ..Qt import QtCore
from .Exporter import Exporter
from ..parametertree import Parameter
from .. import PlotItem
import numpy
try:
import h5py
HAVE_HDF5 = True
except ImportError:
HAVE_HDF5 = False
translate = QtCore.QCoreApplication.translate
__all__ = ['HDF5Exporter']
class HDF5Exporter(Exporter):
Name = "HDF5 Export: plot (x,y)"
windows = []
allowCopy = False
def __init__(self, item):
Exporter.__init__(self, item)
self.params = Parameter(name='params', type='group', children=[
{'name': 'Name', 'title': translate("Exporter", 'Name'), 'type': 'str', 'value': 'Export', },
{'name': 'columnMode', 'title': translate("Exporter", 'columnMode'), 'type': 'list',
'limits': ['(x,y) per plot', '(x,y,y,y) for all plots']},
])
def parameters(self):
return self.params
def export(self, fileName=None):
if not HAVE_HDF5:
raise RuntimeError("This exporter requires the h5py package, "
"but it was not importable.")
if not isinstance(self.item, PlotItem):
raise Exception("Must have a PlotItem selected for HDF5 export.")
if fileName is None:
self.fileSaveDialog(filter=["*.h5", "*.hdf", "*.hd5"])
return
dsname = self.params['Name']
fd = h5py.File(fileName, 'a') # forces append to file... 'w' doesn't seem to "delete/overwrite"
data = []
appendAllX = self.params['columnMode'] == '(x,y) per plot'
# Check if the arrays are ragged
len_first = len(self.item.curves[0].getData()[0]) if self.item.curves[0] else None
ragged = any(len(i.getData()[0]) != len_first for i in self.item.curves)
if ragged:
dgroup = fd.create_group(dsname)
for i, c in enumerate(self.item.curves):
d = c.getData()
fdata = numpy.array([d[0], d[1]]).astype('double')
cname = c.name() if c.name() is not None else str(i)
dset = dgroup.create_dataset(cname, data=fdata)
else:
for i, c in enumerate(self.item.curves):
d = c.getData()
if appendAllX or i == 0:
data.append(d[0])
data.append(d[1])
fdata = numpy.array(data).astype('double')
dset = fd.create_dataset(dsname, data=fdata)
fd.close()
if HAVE_HDF5:
HDF5Exporter.register()
| 33.644737
| 105
| 0.558858
|
ee6502c2d86e025bf9f4610ee65fdc511ffa93cc
| 8,932
|
py
|
Python
|
plugins/houdini/publish/submit_deadline_render.py
|
davidlatwe/reveries-config
|
4a282dd64a32a9b87bd1a070759b6425ff785d68
|
[
"MIT"
] | 3
|
2020-04-01T10:51:17.000Z
|
2021-08-05T18:35:23.000Z
|
plugins/houdini/publish/submit_deadline_render.py
|
davidlatwe/reveries-config
|
4a282dd64a32a9b87bd1a070759b6425ff785d68
|
[
"MIT"
] | null | null | null |
plugins/houdini/publish/submit_deadline_render.py
|
davidlatwe/reveries-config
|
4a282dd64a32a9b87bd1a070759b6425ff785d68
|
[
"MIT"
] | 1
|
2020-07-05T12:06:30.000Z
|
2020-07-05T12:06:30.000Z
|
import os
import json
import platform
import avalon
import pyblish.api
class SubmitDeadlineRender(pyblish.api.InstancePlugin):
"""Publish via rendering rop node on Deadline
Submitting jobs per instance to deadline.
"""
order = pyblish.api.ExtractorOrder + 0.492
hosts = ["houdini"]
label = "Deadline Render"
families = [
"reveries.pointcache",
"reveries.camera",
"reveries.standin",
"reveries.rsproxy",
"reveries.vdbcache",
"reveries.fx.layer_prim",
"reveries.fx.usd",
"reveries.final.usd"
]
targets = ["deadline"]
def process(self, instance):
import reveries
context = instance.context
if not all(result["success"] for result in context.data["results"]):
self.log.warning("Atomicity not held, aborting.")
return
# Context data
username = context.data["user"]
comment = context.data.get("comment", "")
project = context.data["projectDoc"]
asset = instance.data["assetDoc"]["name"]
fpath = context.data["currentMaking"]
houdini_version = context.data["houdiniVersion"]
project_id = str(project["_id"])[-4:].upper()
project_code = project["data"].get("codename") or project_id
fname = os.path.basename(fpath)
batch_name = "({projcode}): [{asset}] {filename}".format(
projcode=project_code,
asset=asset,
filename=fname
)
# Instance data
subset = instance.data["subset"]
version = instance.data["versionNext"]
deadline_pool = instance.data["deadlinePool"]
deadline_prio = instance.data["deadlinePriority"]
deadline_group = instance.data.get("deadlineGroup")
frame_per_task = instance.data.get("deadlineFramesPerTask", 1)
try:
frame_start = int(instance.data["startFrame"])
frame_end = int(instance.data["endFrame"])
frame_step = int(instance.data["step"])
except KeyError:
frames = None
else:
frames = "{start}-{end}x{step}".format(
start=frame_start,
end=frame_end,
step=frame_step,
)
if instance.data["singleOutput"]:
frame_per_task = len(range(frame_start, frame_end + 1))
job_name = "{subset} v{version:0>3}".format(
subset=subset,
version=version,
)
if instance.data.get("deadlineSuspendJob", False):
init_state = "Suspended"
else:
init_state = "Active"
try:
ropnode = instance[0]
except Exception as e:
# In USD publish, few instance don't need ropnode.
print("Get ropnode failed: {}".format(e))
ropnode = None
# Get output path
output = self._get_output(ropnode, context)
_plugin_name = instance.data.get("deadline_plugin", "Houdini")
# Get HoudiniBatch arguments
_arguments = ""
if _plugin_name in ["HoudiniBatch"]:
reveries_path = reveries.__file__
script_file = os.path.join(os.path.dirname(reveries_path),
"scripts",
"deadline_extract_houdini.py")
_arguments = "{} {}".format(
script_file, instance.data.get("deadline_arguments", "")
)
# Assemble payload
payload = {
"JobInfo": {
"Plugin": _plugin_name, # HoudiniBatch/Houdini
"BatchName": batch_name, # Top-level group name
"Name": job_name,
"UserName": username,
"MachineName": platform.node(),
"Comment": comment,
"Pool": deadline_pool,
"Priority": deadline_prio,
"Group": deadline_group,
"Frames": frames,
"ChunkSize": frame_per_task,
"InitialStatus": init_state,
"ExtraInfo0": project["name"],
# "Whitelist": platform.node().lower()
},
"PluginInfo": {
"SceneFile": fpath,
"Build": "64bit",
"Version": houdini_version,
# Renderer Node
"OutputDriver": ropnode.path() if ropnode else None,
# Output Filename
"Output": output,
"Arguments": _arguments, # E:\..\deadline_extract_houdini.py
"ScriptOnly": instance.data.get("deadline_script_only", False),
"IgnoreInputs": False,
"GPUsPerTask": 0,
"SelectGPUDevices": "",
},
# Mandatory for Deadline, may be empty
"AuxFiles": [],
"IdOnly": True
}
# Add dependency for pointcache usd
if instance.data.get("deadline_dependency", False):
payload = self._add_dependency(
instance, payload)
# Environment
environment = self.assemble_environment(instance)
parsed_environment = {
"EnvironmentKeyValue%d" % index: u"{key}={value}".format(
key=key,
value=environment[key]
) for index, key in enumerate(environment)
}
payload["JobInfo"].update(parsed_environment)
self.log.info("Submitting.. %s" % instance)
self.log.info(json.dumps(
payload, indent=4, sort_keys=True)
)
# Submit
submitter = context.data["deadlineSubmitter"]
index = submitter.add_job(payload)
instance.data["deadline_index"] = index
def _get_output(self, ropnode, context):
from reveries.houdini import lib
output = ""
if ropnode:
# Override output to use original $HIP
output = lib.get_output_parameter(ropnode).rawValue()
on_HIP = output.startswith("$HIP")
origin_HIP = os.path.dirname(context.data["originMaking"])
output = output.replace("$HIP", origin_HIP, 1) if on_HIP else output
# (NOTE) ^^^ For a fixed staging dir
# We need this because the scene file we submit to Deadline is a
# backup under `$HIP/_published` dir which copied via extractor
# plugin `AvalonSaveScene`.
#
# Note that the Deadline (10.0.27.2) Houdini plugin does not support
# output filename override if the ROP node type is `alembic`. So to
# make this work, I have modified the Deadline Houdini plugin script
# `{DeadlineRepo}/plugins/Houdini/hrender_dl.py` at line 375:
# ```diff
# - elif ropType == "rop_alembic":
# + elif ropType in ("rop_alembic", "alembic"):
# ```
return output
def _add_dependency(self, instance, payload):
_child_indexs = []
for _instance in instance.data.get("deadline_dependency", []):
if "deadline_index" in list(_instance.data.keys()):
_child_indexs.append(_instance.data.get("deadline_index", ""))
if _child_indexs:
dependency_list = {
"JobDependencies": ",".join(_child_indexs)
}
payload["JobInfo"].update(dependency_list)
return payload
def assemble_environment(self, instance):
"""Compose submission required environment variables for instance
Return:
environment (dict): A set of remote variables, return `None` if
instance is not assigning to remote site or publish is
disabled.
"""
submitter = instance.context.data["deadlineSubmitter"]
environment = submitter.environment()
optional_vars = [
"AVALON_CACHE_ROOT",
"JOB",
]
optional_vars += self._check_redshift_vars()
for var in optional_vars:
value = os.getenv(var)
if value:
environment[var] = value
dumped = ";".join(instance.data["dumpedExtractors"])
environment["PYBLISH_EXTRACTOR_DUMPS"] = dumped
environment["PYBLISH_DUMP_FILE"] = instance.data["dumpPath"]
return environment
def _check_redshift_vars(self):
project = avalon.io.find_one({
"name": avalon.api.Session["AVALON_PROJECT"],
"type": "project"
})
renderer = project.get('renderer', None)
if renderer == "redshift":
return [
"PATH",
"HOUDINI_PATH",
"solidangle_LICENSE",
"redshift_LICENSE",
"PXR_PLUGINPATH_NAME"
]
return []
| 32.129496
| 82
| 0.548589
|
872a1cddcaea784e1cd0245601194c12791830b5
| 1,053
|
py
|
Python
|
polls/migrations/0002_profile.py
|
neethu567/learm_profile
|
c129961865fca88cdc9c1992683d59966ce88bf0
|
[
"MIT"
] | null | null | null |
polls/migrations/0002_profile.py
|
neethu567/learm_profile
|
c129961865fca88cdc9c1992683d59966ce88bf0
|
[
"MIT"
] | null | null | null |
polls/migrations/0002_profile.py
|
neethu567/learm_profile
|
c129961865fca88cdc9c1992683d59966ce88bf0
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-09-30 09:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('polls', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('profile_pic', models.FileField(upload_to='')),
('birth_date', models.DateField(blank=True, null=True)),
('mobile_number', models.IntegerField(default=0)),
('address', models.TextField(blank=True, max_length=500)),
('country', models.TextField(blank=True, max_length=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 36.310345
| 121
| 0.621083
|
dedb74d74ba9dbe5bc61f67a4f8cc6850acf689d
| 3,827
|
py
|
Python
|
include/MPEv2.py
|
jhgalino/MPv2
|
2f5e29d67bccc4538c5aaad2e69e817041414199
|
[
"MIT"
] | null | null | null |
include/MPEv2.py
|
jhgalino/MPv2
|
2f5e29d67bccc4538c5aaad2e69e817041414199
|
[
"MIT"
] | null | null | null |
include/MPEv2.py
|
jhgalino/MPv2
|
2f5e29d67bccc4538c5aaad2e69e817041414199
|
[
"MIT"
] | null | null | null |
import copy
def separateParts(function: str):
assert function.count("~") == 2, "function.count('~') == 2"
functionList = function.split("~")
return functionList
def getParentheses(function: str):
assert type(function) == str, "type(function) == str"
assert function.count("(") >= 1, "function.count('(') >= 1"
assert function.count(")") >= 1, "function.count(')') >= 1"
startCounter = 0
endCounter = 0
functionList = list(function)
for n in range(len(functionList)):
if startCounter < 1 and functionList[n] == "(":
functionList[n] = "~"
startCounter += 1
for n in range(len(functionList) - 1, -1, -1):
if endCounter < 1 and functionList[n] == ")":
functionList[n] = "~"
endCounter += 1
functionList = "".join(functionList)
return functionList
def trig(func: list):
assert len(func) == 3, "len(func) == 3"
assert len(func[2]) == 0, "len(func[2]) == 0"
assert len(func[0]) == 3, "len(func[0]) == 3"
assert type(func[1]) == str, "type(func[1]) == str"
assert type(func[0]) == str, "type(func[0]) == str"
ans = ""
if func[0] == "sin":
ans = "(cos({}))".format(func[1])
elif func[0] == "cos":
ans = "(-sin({}))".format(func[1])
elif func[0] == "sec":
ans = "(sec({})tan({}))".format(func[1], func[1])
elif func[0] == "csc":
ans = "(-csc({})cot({}))".format(func[1], func[1])
elif func[0] == "tan":
ans = "(sec({})^2)".format(func[1])
elif func[0] == "cot":
ans = "(-csc({})^2)".format(func[1])
return ans, func[1]
def coeff(func: list):
assert len(func) == 3, "len(func) == 3"
assert len(func[0]) >= 1, "len(func[0]) >= 1"
exponent = 1
if len(func[2]) == 2:
func[2] = list(func[2])
func[2].pop(0)
func[2] = int("".join(func[2]))
exponent = copy.deepcopy(func[2])
func[2] -= 1
if func[2] in [1, 0]:
func[2] = ""
else:
func[2] = "^" + str(func[2])
func[0] = int(func[0]) * exponent
ans = "{}({}){}".format(func[0], func[1], func[2])
return ans, func[1]
def exp(func: list):
assert len(func[0]) == 0, "len(func[0]) == 0"
assert len(func[2]) in [2, 0], "len(func[0]) == 0"
if len(func[2]) == 2:
func[2] = list(func[2])
func[2].pop(0)
func[2] = int("".join(func[2]))
coefficient = copy.deepcopy(func[2])
func[2] -= 1
if func[2] in [1, 0]:
func[2] = ""
else:
func[2] = "^" + str(func[2])
ans = "{}({}){}".format(coefficient, func[1], func[2])
return ans, func[1]
def chooseMethod(fxnList: list):
assert len(fxnList) == 3, "len(fxnList) == 3"
assert type(fxnList[0]) == str, "type(fxnList[0]) == str"
assert type(fxnList[2]) == str, "type(fxnList[2]) == str"
assert len(fxnList[0]) in [3, 1, 0], "len(fxnList[0]) in [3,1,0]"
assert len(fxnList[2]) in [2, 0], "len(fxnList[2]) in [2, 0]"
if len(fxnList[0]) == 3:
return trig(fxnList)
elif len(fxnList[0]) == 1:
return coeff(fxnList)
elif len(fxnList[0]) == 0:
return exp(fxnList)
def differentiate(fxn: str):
assert type(fxn) == str, "type(fxn) == str"
if fxn == "(x)":
return "1"
else:
answer, nextTerm = chooseMethod(separateParts(getParentheses(fxn)))
assert type(answer) == str, "type(answer) == str"
assert type(nextTerm) == str, "type(nextTerm) == str"
if nextTerm == "x":
return "{}".format(answer)
else:
return "'{}'".format(answer + "*" + differentiate(nextTerm))
OTHER_RECURSIVE_FUNCTIONS = [
"chooseMethod",
"exp",
"coeff",
"trig",
"getParentheses",
"separateParts",
]
| 29.666667
| 75
| 0.512934
|
461e1d12e91a2eae193c5b26bd1ffce9c488e906
| 1,199
|
py
|
Python
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/ad_strength.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/ad_strength.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/ad_strength.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'AdStrengthEnum',
},
)
class AdStrengthEnum(proto.Message):
r"""Container for enum describing possible ad strengths.
"""
class AdStrength(proto.Enum):
r"""Enum listing the possible ad strengths."""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
NO_ADS = 3
POOR = 4
AVERAGE = 5
GOOD = 6
EXCELLENT = 7
__all__ = tuple(sorted(__protobuf__.manifest))
| 27.25
| 74
| 0.674729
|
9ca6fbedffd2d255d30e95399033c72840511c32
| 15,231
|
py
|
Python
|
venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/chardet/euckrfreq.py
|
nguidjoi/bigData
|
aef722e77c10b8b0261578277892ebb15764d680
|
[
"Apache-2.0"
] | 1
|
2019-05-29T15:22:28.000Z
|
2019-05-29T15:22:28.000Z
|
venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/chardet/euckrfreq.py
|
nguidjoi/bigData
|
aef722e77c10b8b0261578277892ebb15764d680
|
[
"Apache-2.0"
] | 2
|
2020-10-23T06:51:04.000Z
|
2020-11-12T07:03:37.000Z
|
venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/chardet/euckrfreq.py
|
nguidjoi/bigData
|
aef722e77c10b8b0261578277892ebb15764d680
|
[
"Apache-2.0"
] | 1
|
2019-05-29T15:24:24.000Z
|
2019-05-29T15:24:24.000Z
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKR_CHAR_TO_FREQ_ORDER = (
13, 130, 120, 1396, 481, 1719, 1720, 328, 609, 212, 1721, 707, 400, 299, 1722, 87,
1397, 1723, 104, 536, 1117, 1203, 1724, 1267, 685, 1268, 508, 1725, 1726, 1727, 1728, 1398,
1399, 1729, 1730, 1731, 141, 621, 326, 1057, 368, 1732, 267, 488, 20, 1733, 1269, 1734,
945, 1400, 1735, 47, 904, 1270, 1736, 1737, 773, 248, 1738, 409, 313, 786, 429, 1739,
116, 987, 813, 1401, 683, 75, 1204, 145, 1740, 1741, 1742, 1743, 16, 847, 667, 622,
708, 1744, 1745, 1746, 966, 787, 304, 129, 1747, 60, 820, 123, 676, 1748, 1749, 1750,
1751, 617, 1752, 626, 1753, 1754, 1755, 1756, 653, 1757, 1758, 1759, 1760, 1761, 1762, 856,
344, 1763, 1764, 1765, 1766, 89, 401, 418, 806, 905, 848, 1767, 1768, 1769, 946, 1205,
709, 1770, 1118, 1771, 241, 1772, 1773, 1774, 1271, 1775, 569, 1776, 999, 1777, 1778, 1779,
1780, 337, 751, 1058, 28, 628, 254, 1781, 177, 906, 270, 349, 891, 1079, 1782, 19,
1783, 379, 1784, 315, 1785, 629, 754, 1402, 559, 1786, 636, 203, 1206, 1787, 710, 567,
1788, 935, 814, 1789, 1790, 1207, 766, 528, 1791, 1792, 1208, 1793, 1794, 1795, 1796, 1797,
1403, 1798, 1799, 533, 1059, 1404, 1405, 1156, 1406, 936, 884, 1080, 1800, 351, 1801, 1802,
1803, 1804, 1805, 801, 1806, 1807, 1808, 1119, 1809, 1157, 714, 474, 1407, 1810, 298, 899,
885, 1811, 1120, 802, 1158, 1812, 892, 1813, 1814, 1408, 659, 1815, 1816, 1121, 1817, 1818,
1819, 1820, 1821, 1822, 319, 1823, 594, 545, 1824, 815, 937, 1209, 1825, 1826, 573, 1409,
1022, 1827, 1210, 1828, 1829, 1830, 1831, 1832, 1833, 556, 722, 807, 1122, 1060, 1834, 697,
1835, 900, 557, 715, 1836, 1410, 540, 1411, 752, 1159, 294, 597, 1211, 976, 803, 770,
1412, 1837, 1838, 39, 794, 1413, 358, 1839, 371, 925, 1840, 453, 661, 788, 531, 723,
544, 1023, 1081, 869, 91, 1841, 392, 430, 790, 602, 1414, 677, 1082, 457, 1415, 1416,
1842, 1843, 475, 327, 1024, 1417, 795, 121, 1844, 733, 403, 1418, 1845, 1846, 1847, 300,
119, 711, 1212, 627, 1848, 1272, 207, 1849, 1850, 796, 1213, 382, 1851, 519, 1852, 1083,
893, 1853, 1854, 1855, 367, 809, 487, 671, 1856, 663, 1857, 1858, 956, 471, 306, 857,
1859, 1860, 1160, 1084, 1861, 1862, 1863, 1864, 1865, 1061, 1866, 1867, 1868, 1869, 1870, 1871,
282, 96, 574, 1872, 502, 1085, 1873, 1214, 1874, 907, 1875, 1876, 827, 977, 1419, 1420,
1421, 268, 1877, 1422, 1878, 1879, 1880, 308, 1881, 2, 537, 1882, 1883, 1215, 1884, 1885,
127, 791, 1886, 1273, 1423, 1887, 34, 336, 404, 643, 1888, 571, 654, 894, 840, 1889,
0, 886, 1274, 122, 575, 260, 908, 938, 1890, 1275, 410, 316, 1891, 1892, 100, 1893,
1894, 1123, 48, 1161, 1124, 1025, 1895, 633, 901, 1276, 1896, 1897, 115, 816, 1898, 317,
1899, 694, 1900, 909, 734, 1424, 572, 866, 1425, 691, 85, 524, 1010, 543, 394, 841,
1901, 1902, 1903, 1026, 1904, 1905, 1906, 1907, 1908, 1909, 30, 451, 651, 988, 310, 1910,
1911, 1426, 810, 1216, 93, 1912, 1913, 1277, 1217, 1914, 858, 759, 45, 58, 181, 610,
269, 1915, 1916, 131, 1062, 551, 443, 1000, 821, 1427, 957, 895, 1086, 1917, 1918, 375,
1919, 359, 1920, 687, 1921, 822, 1922, 293, 1923, 1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174, 1925, 69, 1162, 728, 1428, 910, 1926, 1278, 1218, 1279, 386, 870,
217, 854, 1163, 823, 1927, 1928, 1929, 1930, 834, 1931, 78, 1932, 859, 1933, 1063, 1934,
1935, 1936, 1937, 438, 1164, 208, 595, 1938, 1939, 1940, 1941, 1219, 1125, 1942, 280, 888,
1429, 1430, 1220, 1431, 1943, 1944, 1945, 1946, 1947, 1280, 150, 510, 1432, 1948, 1949, 1950,
1951, 1952, 1953, 1954, 1011, 1087, 1955, 1433, 1043, 1956, 881, 1957, 614, 958, 1064, 1065,
1221, 1958, 638, 1001, 860, 967, 896, 1434, 989, 492, 553, 1281, 1165, 1959, 1282, 1002,
1283, 1222, 1960, 1961, 1962, 1963, 36, 383, 228, 753, 247, 454, 1964, 876, 678, 1965,
1966, 1284, 126, 464, 490, 835, 136, 672, 529, 940, 1088, 1435, 473, 1967, 1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882, 1126, 1285,
639, 1044, 133, 140, 288, 360, 811, 563, 1027, 561, 142, 523, 1969, 1970, 1971, 7,
103, 296, 439, 407, 506, 634, 990, 1972, 1973, 1974, 1975, 645, 1976, 1977, 1978, 1979,
1980, 1981, 236, 1982, 1436, 1983, 1984, 1089, 192, 828, 618, 518, 1166, 333, 1127, 1985,
818, 1223, 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 342, 1128, 1286, 746, 842, 1994,
1995, 560, 223, 1287, 98, 8, 189, 650, 978, 1288, 1996, 1437, 1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167, 1998, 201, 1999, 2000, 843, 836, 824,
532, 338, 783, 1090, 182, 576, 436, 1438, 1439, 527, 500, 2001, 947, 889, 2002, 2003,
2004, 2005, 262, 600, 314, 447, 2006, 547, 2007, 693, 738, 1129, 2008, 71, 1440, 745,
619, 688, 2009, 829, 2010, 2011, 147, 2012, 33, 948, 2013, 2014, 74, 224, 2015, 61,
191, 918, 399, 637, 2016, 1028, 1130, 257, 902, 2017, 2018, 2019, 2020, 2021, 2022, 2023,
2024, 2025, 2026, 837, 2027, 2028, 2029, 2030, 179, 874, 591, 52, 724, 246, 2031, 2032,
2033, 2034, 1167, 969, 2035, 1289, 630, 605, 911, 1091, 1168, 2036, 2037, 2038, 1441, 912,
2039, 623, 2040, 2041, 253, 1169, 1290, 2042, 1442, 146, 620, 611, 577, 433, 2043, 1224,
719, 1170, 959, 440, 437, 534, 84, 388, 480, 1131, 159, 220, 198, 679, 2044, 1012,
819, 1066, 1443, 113, 1225, 194, 318, 1003, 1029, 2045, 2046, 2047, 2048, 1067, 2049, 2050,
2051, 2052, 2053, 59, 913, 112, 2054, 632, 2055, 455, 144, 739, 1291, 2056, 273, 681,
499, 2057, 448, 2058, 2059, 760, 2060, 2061, 970, 384, 169, 245, 1132, 2062, 2063, 414,
1444, 2064, 2065, 41, 235, 2066, 157, 252, 877, 568, 919, 789, 580, 2067, 725, 2068,
2069, 1292, 2070, 2071, 1445, 2072, 1446, 2073, 2074, 55, 588, 66, 1447, 271, 1092, 2075,
1226, 2076, 960, 1013, 372, 2077, 2078, 2079, 2080, 2081, 1293, 2082, 2083, 2084, 2085, 850,
2086, 2087, 2088, 2089, 2090, 186, 2091, 1068, 180, 2092, 2093, 2094, 109, 1227, 522, 606,
2095, 867, 1448, 1093, 991, 1171, 926, 353, 1133, 2096, 581, 2097, 2098, 2099, 1294, 1449,
1450, 2100, 596, 1172, 1014, 1228, 2101, 1451, 1295, 1173, 1229, 2102, 2103, 1296, 1134, 1452,
949, 1135, 2104, 2105, 1094, 1453, 1454, 1455, 2106, 1095, 2107, 2108, 2109, 2110, 2111, 2112,
2113, 2114, 2115, 2116, 2117, 804, 2118, 2119, 1230, 1231, 805, 1456, 405, 1136, 2120, 2121,
2122, 2123, 2124, 720, 701, 1297, 992, 1457, 927, 1004, 2125, 2126, 2127, 2128, 2129, 2130,
22, 417, 2131, 303, 2132, 385, 2133, 971, 520, 513, 2134, 1174, 73, 1096, 231, 274,
962, 1458, 673, 2135, 1459, 2136, 152, 1137, 2137, 2138, 2139, 2140, 1005, 1138, 1460, 1139,
2141, 2142, 2143, 2144, 11, 374, 844, 2145, 154, 1232, 46, 1461, 2146, 838, 830, 721,
1233, 106, 2147, 90, 428, 462, 578, 566, 1175, 352, 2148, 2149, 538, 1234, 124, 1298,
2150, 1462, 761, 565, 2151, 686, 2152, 649, 2153, 72, 173, 2154, 460, 415, 2155, 1463,
2156, 1235, 305, 2157, 2158, 2159, 2160, 2161, 2162, 579, 2163, 2164, 2165, 2166, 2167, 747,
2168, 2169, 2170, 2171, 1464, 669, 2172, 2173, 2174, 2175, 2176, 1465, 2177, 23, 530, 285,
2178, 335, 729, 2179, 397, 2180, 2181, 2182, 1030, 2183, 2184, 698, 2185, 2186, 325, 2187,
2188, 369, 2189, 799, 1097, 1015, 348, 2190, 1069, 680, 2191, 851, 1466, 2192, 2193, 10,
2194, 613, 424, 2195, 979, 108, 449, 589, 27, 172, 81, 1031, 80, 774, 281, 350,
1032, 525, 301, 582, 1176, 2196, 674, 1045, 2197, 2198, 1467, 730, 762, 2199, 2200, 2201,
2202, 1468, 2203, 993, 2204, 2205, 266, 1070, 963, 1140, 2206, 2207, 2208, 664, 1098, 972,
2209, 2210, 2211, 1177, 1469, 1470, 871, 2212, 2213, 2214, 2215, 2216, 1471, 2217, 2218, 2219,
2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 1472, 1236, 2228, 2229, 2230, 2231, 2232, 2233,
2234, 2235, 1299, 2236, 2237, 200, 2238, 477, 373, 2239, 2240, 731, 825, 777, 2241, 2242,
2243, 521, 486, 548, 2244, 2245, 2246, 1473, 1300, 53, 549, 137, 875, 76, 158, 2247,
1301, 1474, 469, 396, 1016, 278, 712, 2248, 321, 442, 503, 767, 744, 941, 1237, 1178,
1475, 2249, 82, 178, 1141, 1179, 973, 2250, 1302, 2251, 297, 2252, 2253, 570, 2254, 2255,
2256, 18, 450, 206, 2257, 290, 292, 1142, 2258, 511, 162, 99, 346, 164, 735, 2259,
1476, 1477, 4, 554, 343, 798, 1099, 2260, 1100, 2261, 43, 171, 1303, 139, 215, 2262,
2263, 717, 775, 2264, 1033, 322, 216, 2265, 831, 2266, 149, 2267, 1304, 2268, 2269, 702,
1238, 135, 845, 347, 309, 2270, 484, 2271, 878, 655, 238, 1006, 1478, 2272, 67, 2273,
295, 2274, 2275, 461, 2276, 478, 942, 412, 2277, 1034, 2278, 2279, 2280, 265, 2281, 541,
2282, 2283, 2284, 2285, 2286, 70, 852, 1071, 2287, 2288, 2289, 2290, 21, 56, 509, 117,
432, 2291, 2292, 331, 980, 552, 1101, 148, 284, 105, 393, 1180, 1239, 755, 2293, 187,
2294, 1046, 1479, 2295, 340, 2296, 63, 1047, 230, 2297, 2298, 1305, 763, 1306, 101, 800,
808, 494, 2299, 2300, 2301, 903, 2302, 37, 1072, 14, 5, 2303, 79, 675, 2304, 312,
2305, 2306, 2307, 2308, 2309, 1480, 6, 1307, 2310, 2311, 2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964, 2314, 259, 2315,
501, 380, 2316, 2317, 83, 981, 153, 689, 1308, 1481, 1482, 1483, 2318, 2319, 716, 1484,
2320, 2321, 2322, 2323, 2324, 2325, 1485, 2326, 2327, 128, 57, 68, 261, 1048, 211, 170,
1240, 31, 2328, 51, 435, 742, 2329, 2330, 2331, 635, 2332, 264, 456, 2333, 2334, 2335,
425, 2336, 1486, 143, 507, 263, 943, 2337, 363, 920, 1487, 256, 1488, 1102, 243, 601,
1489, 2338, 2339, 2340, 2341, 2342, 2343, 2344, 861, 2345, 2346, 2347, 2348, 2349, 2350, 395,
2351, 1490, 1491, 62, 535, 166, 225, 2352, 2353, 668, 419, 1241, 138, 604, 928, 2354,
1181, 2355, 1492, 1493, 2356, 2357, 2358, 1143, 2359, 696, 2360, 387, 307, 1309, 682, 476,
2361, 2362, 332, 12, 222, 156, 2363, 232, 2364, 641, 276, 656, 517, 1494, 1495, 1035,
416, 736, 1496, 2365, 1017, 586, 2366, 2367, 2368, 1497, 2369, 242, 2370, 2371, 2372, 1498,
2373, 965, 713, 2374, 2375, 2376, 2377, 740, 982, 1499, 944, 1500, 1007, 2378, 2379, 1310,
1501, 2380, 2381, 2382, 785, 329, 2383, 2384, 1502, 2385, 2386, 2387, 932, 2388, 1503, 2389,
2390, 2391, 2392, 1242, 2393, 2394, 2395, 2396, 2397, 994, 950, 2398, 2399, 2400, 2401, 1504,
1311, 2402, 2403, 2404, 2405, 1049, 749, 2406, 2407, 853, 718, 1144, 1312, 2408, 1182, 1505,
2409, 2410, 255, 516, 479, 564, 550, 214, 1506, 1507, 1313, 413, 239, 444, 339, 1145,
1036, 1508, 1509, 1314, 1037, 1510, 1315, 2411, 1511, 2412, 2413, 2414, 176, 703, 497, 624,
593, 921, 302, 2415, 341, 165, 1103, 1512, 2416, 1513, 2417, 2418, 2419, 376, 2420, 700,
2421, 2422, 2423, 258, 768, 1316, 2424, 1183, 2425, 995, 608, 2426, 2427, 2428, 2429, 221,
2430, 2431, 2432, 2433, 2434, 2435, 2436, 2437, 195, 323, 726, 188, 897, 983, 1317, 377,
644, 1050, 879, 2438, 452, 2439, 2440, 2441, 2442, 2443, 2444, 914, 2445, 2446, 2447, 2448,
915, 489, 2449, 1514, 1184, 2450, 2451, 515, 64, 427, 495, 2452, 583, 2453, 483, 485,
1038, 562, 213, 1515, 748, 666, 2454, 2455, 2456, 2457, 334, 2458, 780, 996, 1008, 705,
1243, 2459, 2460, 2461, 2462, 2463, 114, 2464, 493, 1146, 366, 163, 1516, 961, 1104, 2465,
291, 2466, 1318, 1105, 2467, 1517, 365, 2468, 355, 951, 1244, 2469, 1319, 2470, 631, 2471,
2472, 218, 1320, 364, 320, 756, 1518, 1519, 1321, 1520, 1322, 2473, 2474, 2475, 2476, 997,
2477, 2478, 2479, 2480, 665, 1185, 2481, 916, 1521, 2482, 2483, 2484, 584, 684, 2485, 2486,
797, 2487, 1051, 1186, 2488, 2489, 2490, 1522, 2491, 2492, 370, 2493, 1039, 1187, 65, 2494,
434, 205, 463, 1188, 2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585, 2496, 590, 505, 1073, 2497, 599, 244, 219, 917, 1018, 952, 646, 1523, 2498, 1323,
2499, 2500, 49, 984, 354, 741, 2501, 625, 2502, 1324, 2503, 1019, 190, 357, 757, 491,
95, 782, 868, 2504, 2505, 2506, 2507, 2508, 2509, 134, 1524, 1074, 422, 1525, 898, 2510,
161, 2511, 2512, 2513, 2514, 769, 2515, 1526, 2516, 2517, 411, 1325, 2518, 472, 1527, 2519,
2520, 2521, 2522, 2523, 2524, 985, 2525, 2526, 2527, 2528, 2529, 2530, 764, 2531, 1245, 2532,
2533, 25, 204, 311, 2534, 496, 2535, 1052, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 199,
704, 504, 468, 758, 657, 1528, 196, 44, 839, 1246, 272, 750, 2543, 765, 862, 2544,
2545, 1326, 2546, 132, 615, 933, 2547, 732, 2548, 2549, 2550, 1189, 1529, 2551, 283, 1247,
1053, 607, 929, 2552, 2553, 2554, 930, 183, 872, 616, 1040, 1147, 2555, 1148, 1020, 441,
249, 1075, 2556, 2557, 2558, 466, 743, 2559, 2560, 2561, 92, 514, 426, 420, 526, 2562,
2563, 2564, 2565, 2566, 2567, 2568, 185, 2569, 2570, 2571, 2572, 776, 1530, 658, 2573, 362,
2574, 361, 922, 1076, 793, 2575, 2576, 2577, 2578, 2579, 2580, 1531, 251, 2581, 2582, 2583,
2584, 1532, 54, 612, 237, 1327, 2585, 2586, 275, 408, 647, 111, 2587, 1533, 1106, 465,
3, 458, 9, 38, 2588, 107, 110, 890, 209, 26, 737, 498, 2589, 1534, 2590, 431,
202, 88, 1535, 356, 287, 1107, 660, 1149, 2591, 381, 1536, 986, 1150, 445, 1248, 1151,
974, 2592, 2593, 846, 2594, 446, 953, 184, 1249, 1250, 727, 2595, 923, 193, 883, 2596,
2597, 2598, 102, 324, 539, 817, 2599, 421, 1041, 2600, 832, 2601, 94, 175, 197, 406,
2602, 459, 2603, 2604, 2605, 2606, 2607, 330, 555, 2608, 2609, 2610, 706, 1108, 389, 2611,
2612, 2613, 2614, 233, 2615, 833, 558, 931, 954, 1251, 2616, 2617, 1537, 546, 2618, 2619,
1009, 2620, 2621, 2622, 1538, 690, 1328, 2623, 955, 2624, 1539, 2625, 2626, 772, 2627, 2628,
2629, 2630, 2631, 924, 648, 863, 603, 2632, 2633, 934, 1540, 864, 865, 2634, 642, 1042,
670, 1190, 2635, 2636, 2637, 2638, 168, 2639, 652, 873, 542, 1054, 1541, 2640, 2641, 2642, # 512, 256
)
| 78.107692
| 106
| 0.620183
|
28a77947a8d419729e6917f8ee8f956d770ceb9c
| 4,985
|
py
|
Python
|
tf2_gnn/layers/message_passing/rgin.py
|
Fatead/tf2-gnn-code
|
8e27ecda0d2242f6be23854a25f43f7379db6f86
|
[
"MIT"
] | 1
|
2021-09-27T13:53:21.000Z
|
2021-09-27T13:53:21.000Z
|
tf2_gnn/layers/message_passing/rgin.py
|
Fatead/tf2-gnn-code
|
8e27ecda0d2242f6be23854a25f43f7379db6f86
|
[
"MIT"
] | null | null | null |
tf2_gnn/layers/message_passing/rgin.py
|
Fatead/tf2-gnn-code
|
8e27ecda0d2242f6be23854a25f43f7379db6f86
|
[
"MIT"
] | null | null | null |
"""Relation Graph Isomorphism Network message propogation layer."""
from typing import Dict, List, Any, Optional
import tensorflow as tf
from dpu_utils.tf2utils import MLP
from .message_passing import MessagePassing, MessagePassingInput, register_message_passing_implementation
from .gnn_edge_mlp import GNN_Edge_MLP
from tf2_gnn.utils.constants import SMALL_NUMBER
@register_message_passing_implementation
class RGIN(GNN_Edge_MLP):
"""
图同构网络GIN
GIN源于论文《How Powerful are Graph Neural Networks?》,通过对于现有图神经网络的分析,
提出了GIN的结构,主要特点如下:
1.GIN模型在图节点邻居特征的每一跳聚合操作之后,又与自身的原始特征混合起来,并且在最后可以
拟合任意规则的全连接神经网络进行处理,使其具有单射特性
2.在特征混合的过程中,引入了一个可学习参数对自身特征进行调节,并将调节后的特征与聚合后的邻
居特征进行相加。
Compute new graph states by neural message passing using MLPs for state updates
and message computation.
For this, we assume existing node states h^t_v and a list of per-edge-type adjacency
matrices A_\ell.
We compute new states as follows:
h^{t+1}_v := \sigma(MLP_{aggr}(\sum_\ell \sum_{(u, v) \in A_\ell} MLP_\ell(h^t_u)))
The learnable parameters of this are the MLPs MLP_\ell.
This is derived from Cor. 6 of arXiv:1810.00826, instantiating the functions f, \phi
with _separate_ MLPs. This is more powerful than the GIN formulation in Eq. (4.1) of
arXiv:1810.00826, as we want to be able to distinguish graphs of the form
G_1 = (V={1, 2, 3}, E_1={(1, 2)}, E_2={(3, 2)})
and
G_2 = (V={1, 2, 3}, E_1={(3, 2)}, E_2={(1, 2)})
from each other. If we would treat all edges the same,
G_1.E_1 \cup G_1.E_2 == G_2.E_1 \cup G_2.E_2 would imply that the two graphs
become indistuingishable.
Hence, we introduce per-edge-type MLPs, which also means that we have to drop
the optimisation of modelling f \circ \phi by a single MLP used in the original
GIN formulation.
Note that RGIN is implemented as a special-case of GNN_Edge_MLP, setting some
different default hyperparameters and adding a different message aggregation
function, but re-using the message passing functionality.
We use the following abbreviations in shape descriptions:
* V: number of nodes
* L: number of different edge types
* E: number of edges of a given edge type
* D: input node representation dimension
* H: output node representation dimension (set as hidden_dim)
>>> node_embeddings = tf.random.normal(shape=(5, 3))
>>> adjacency_lists = (
... tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32),
... tf.constant([[2, 3], [2, 4]], dtype=tf.int32),
... tf.constant([[3, 1]], dtype=tf.int32),
... )
...
>>> params = RGIN.get_default_hyperparameters()
>>> params["hidden_dim"] = 12
>>> layer = RGIN(params)
>>> output = layer(MessagePassingInput(node_embeddings, adjacency_lists))
>>> print(output)
tf.Tensor(..., shape=(5, 12), dtype=float32)
"""
@classmethod
def get_default_hyperparameters(cls):
these_hypers = {
"use_target_state_as_input": False,
"num_edge_MLP_hidden_layers": 1,
"num_aggr_MLP_hidden_layers": None,
}
gnn_edge_mlp_hypers = super().get_default_hyperparameters()
gnn_edge_mlp_hypers.update(these_hypers)
return gnn_edge_mlp_hypers
def __init__(self, params: Dict[str, Any], **kwargs):
super().__init__(params, **kwargs)
self._num_aggr_MLP_hidden_layers: Optional[int] = params["num_aggr_MLP_hidden_layers"]
self._aggregation_mlp: Optional[MLP] = None
def build(self, input_shapes: MessagePassingInput):
node_embedding_shapes = input_shapes.node_embeddings
if self._num_aggr_MLP_hidden_layers is not None:
with tf.name_scope("aggregation_MLP"):
self._aggregation_mlp = MLP(
out_size=self._hidden_dim,
hidden_layers=[self._hidden_dim] * self._num_aggr_MLP_hidden_layers,
)
self._aggregation_mlp.build(tf.TensorShape((None, self._hidden_dim)))
super().build(input_shapes)
def _compute_new_node_embeddings(
self,
cur_node_embeddings: tf.Tensor,
messages_per_type: List[tf.Tensor],
edge_type_to_message_targets: List[tf.Tensor],
num_nodes: tf.Tensor,
training: bool,
):
# Let M be the number of messages (sum of all E):
message_targets = tf.concat(edge_type_to_message_targets, axis=0) # Shape [M]
messages = tf.concat(messages_per_type, axis=0) # Shape [M, H]
aggregated_messages = self._aggregation_fn(
data=messages, segment_ids=message_targets, num_segments=num_nodes
)
if self._aggregation_mlp is not None:
aggregated_messages = self._aggregation_mlp(aggregated_messages, training)
return self._activation_fn(aggregated_messages)
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
| 40.860656
| 105
| 0.683651
|
84213d5cd6dd7d6ea868e9c14e6d78e4f4b21402
| 12,530
|
py
|
Python
|
setup.py
|
jicius/airflow
|
69e9e9729cb26aaed3c5c4c290d26f3eb40c72fd
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-09-03T09:35:30.000Z
|
2020-09-03T09:35:30.000Z
|
setup.py
|
ciusji/airflow
|
69e9e9729cb26aaed3c5c4c290d26f3eb40c72fd
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
setup.py
|
ciusji/airflow
|
69e9e9729cb26aaed3c5c4c290d26f3eb40c72fd
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2019-11-08T08:49:27.000Z
|
2019-11-08T08:49:27.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import io
import logging
import os
import sys
import subprocess
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
PY3 = sys.version_info[0] == 3
with io.open('README.md', encoding='utf-8') as f:
long_description = f.read()
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Custom compile assets command to compile and build the frontend
assets using npm and webpack.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.call('./airflow/www/compile_assets.sh')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Cannot compute the git version. {}'.format(e))
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version, sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async_packages = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
atlas = ['atlasclient>=0.1.2']
aws = [
'boto3>=1.7.0, <1.8.0',
]
azure = [
'azure-storage>=0.34.0',
'azure-mgmt-resource==1.2.2',
'azure-mgmt-datalake-store==0.4.0',
'azure-datalake-store==0.0.19',
'azure-cosmos>=3.0.1',
'azure-mgmt-containerinstance',
]
cassandra = ['cassandra-driver>=3.13.0']
celery = [
'celery>=4.1.1, <4.2.0',
'flower>=0.7.3, <1.0'
]
cgroups = [
'cgroupspy>=0.1.4',
]
# major update coming soon, clamp to 0.x
cloudant = ['cloudant>=0.5.9,<2.0']
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.17.1, <2'
]
databricks = ['requests>=2.20.0, <3']
datadog = ['datadog>=0.14.0']
doc = [
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'sphinxcontrib-httpdomain>=1.7.0',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker~=3.0']
druid = ['pydruid>=0.4.1']
elasticsearch = [
'elasticsearch>=5.0.0,<6.0.0',
'elasticsearch-dsl>=5.0.0,<6.0.0'
]
gcp_api = [
'httplib2>=0.9.2',
'google-api-python-client>=1.6.0, <2.0.0dev',
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'google-cloud-container>=0.1.1',
'google-cloud-bigtable==0.31.0',
'google-cloud-spanner>=1.7.1',
'grpcio-gcp>=0.2.2',
'PyOpenSSL',
'pandas-gbq'
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
google_auth = ['Flask-OAuthlib>=0.9.1']
hdfs = ['snakebite>=2.7.8']
hive = [
'hmsclient>=0.1.0',
'pyhive>=0.6.0',
]
jdbc = ['jaydebeapi>=1.1.1']
jenkins = ['python-jenkins>=0.4.15']
jira = ['JIRA>1.0.7']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
kubernetes = ['kubernetes>=3.0.0',
'cryptography>=2.0.0']
ldap = ['ldap3>=2.5.1']
mssql = ['pymssql>=2.1.1']
mysql = ['mysqlclient>=1.3.6,<1.4']
oracle = ['cx_Oracle>=5.1.2']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = ['pinotdb>=0.1.1']
postgres = ['psycopg2>=2.7.4']
qds = ['qds-sdk>=1.10.4']
rabbitmq = ['librabbitmq>=1.6.1']
redis = ['redis>=2.10.5,<3.0.0']
salesforce = ['simple-salesforce>=0.72']
samba = ['pysmbclient>=0.1.3']
segment = ['analytics-python>=1.2.9']
sendgrid = ['sendgrid>=5.2.0']
slack = ['slackclient>=1.0.0']
mongo = ['pymongo>=3.6.0']
snowflake = ['snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0']
ssh = ['paramiko>=2.1.1', 'pysftp>=0.2.9', 'sshtunnel>=0.1.4,<0.2']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
winrm = ['pywinrm==0.2.2']
zendesk = ['zdesk']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant + druid + pinot \
+ cassandra + mongo
devel = [
'click==6.7',
'freezegun',
'jira',
'lxml>=4.0.0',
'mock',
'mongomock',
'moto==1.3.5',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'paramiko',
'pysftp',
'pywinrm',
'qds-sdk>=1.9.6',
'rednose',
'requests_mock',
'flake8>=3.6.0',
'typing',
]
if not PY3:
devel += ['unittest2']
devel_minreq = devel + kubernetes + mysql + doc + password + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = (sendgrid + devel + all_dbs + doc + samba + slack + crypto + oracle +
docker + ssh + kubernetes + celery + redis + gcp_api +
datadog + zendesk + jdbc + ldap + kerberos + password + webhdfs + jenkins +
druid + pinot + segment + snowflake + elasticsearch +
atlas + azure + aws)
# Snakebite & Google Cloud Dataflow are not Python 3 compatible :'(
if PY3:
devel_ci = [package for package in devel_all if package not in
['snakebite>=2.7.8', 'snakebite[kerberos]>=2.7.8']]
else:
devel_ci = devel_all
def do_setup():
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
long_description=long_description,
long_description_content_type='text/markdown',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.9, <1.0',
'bleach~=2.1.3',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'enum34~=1.1.6;python_version<"3.4"',
'flask>=0.12.4, <0.13',
'flask-appbuilder==1.12.3',
'flask-admin==1.5.2',
'flask-caching>=1.3.3, <1.4.0',
'flask-login>=0.3, <0.5',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.4.0, <20.0',
'iso8601>=0.1.12',
'json-merge-patch==0.2',
'jinja2>=2.7.3, <=2.10.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.4.4',
'psutil>=4.2.0, <6.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.15.0',
'requests>=2.20.0, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=1.1.15, <1.3.0',
'tabulate>=0.7.5, <=0.8.2',
'tenacity==4.12.0',
'text-unidecode==1.2', # Avoid GPL dependency, pip uses reverse order(!)
'thrift>=0.9.2',
'tzlocal>=1.4',
'unicodecsv>=0.14.1',
'werkzeug>=0.14.1, <0.15.0',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'devel_ci': devel_ci,
'all_dbs': all_dbs,
'atlas': atlas,
'async': async_packages,
'aws': aws,
'azure': azure,
'cassandra': cassandra,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'elasticsearch': elasticsearch,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'google_auth': google_auth,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes,
'ldap': ldap,
'mongo': mongo,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'pinot': pinot,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
'salesforce': salesforce,
'samba': samba,
'sendgrid': sendgrid,
'segment': segment,
'slack': slack,
'snowflake': snowflake,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'winrm': winrm
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.apache.org',
url='http://airflow.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
'compile_assets': CompileAssets
},
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
)
if __name__ == "__main__":
do_setup()
| 30.338983
| 90
| 0.560335
|
d35e9e3e1815db18c2e550bb75b5c8a87070c987
| 718
|
py
|
Python
|
redun/__init__.py
|
dakoner/redun
|
3e1003cfe8e2bcee435aa6f4aa5bf42ee1d162d0
|
[
"Apache-2.0"
] | null | null | null |
redun/__init__.py
|
dakoner/redun
|
3e1003cfe8e2bcee435aa6f4aa5bf42ee1d162d0
|
[
"Apache-2.0"
] | null | null | null |
redun/__init__.py
|
dakoner/redun
|
3e1003cfe8e2bcee435aa6f4aa5bf42ee1d162d0
|
[
"Apache-2.0"
] | null | null | null |
from redun.executors.aws_batch import AWSBatchExecutor # noqa: F401
from redun.executors.aws_glue import AWSGlueExecutor # noqa: F401
from redun.executors.local import LocalExecutor # noqa: F401
from redun.file import Dir, File, ShardedS3Dataset # noqa: F401
from redun.handle import Handle # noqa: F401
from redun.namespace import get_current_namespace, namespace # noqa: F401
from redun.scheduler import ( # noqa: F401
Scheduler,
apply_tags,
catch,
cond,
get_current_scheduler,
merge_handles,
set_current_scheduler,
throw,
)
from redun.scripting import script # noqa: F401
from redun.task import PartialTask, Task, get_task_registry, task # noqa: F401
__version__ = "0.8.3"
| 34.190476
| 79
| 0.75766
|
957cb16d73605820e417c9aa745f262501a709ae
| 51
|
py
|
Python
|
pyecharts/_version.py
|
swuecho/pyecharts
|
ee659c991bcff9c7279402b0c42e48c3c91d8715
|
[
"MIT"
] | 11,032
|
2017-12-21T01:21:38.000Z
|
2022-03-31T23:02:38.000Z
|
pyecharts/_version.py
|
swuecho/pyecharts
|
ee659c991bcff9c7279402b0c42e48c3c91d8715
|
[
"MIT"
] | 1,687
|
2017-12-21T02:10:47.000Z
|
2022-03-31T14:31:45.000Z
|
pyecharts/_version.py
|
swuecho/pyecharts
|
ee659c991bcff9c7279402b0c42e48c3c91d8715
|
[
"MIT"
] | 2,528
|
2017-12-21T07:57:52.000Z
|
2022-03-30T15:34:51.000Z
|
__version__ = "1.9.0"
__author__ = "chenjiandongx"
| 17
| 28
| 0.72549
|
4b0e36991ada7435d40fa6e0e80706a9b58c40b6
| 3,605
|
py
|
Python
|
tests/test_requirements.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | 6
|
2017-11-05T02:44:10.000Z
|
2021-07-14T19:10:56.000Z
|
tests/test_requirements.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_requirements.py
|
immersionroom/vee
|
2c6f781dc96e9028f2446777b906ca37dc2f4299
|
[
"BSD-3-Clause"
] | 1
|
2017-01-31T23:10:09.000Z
|
2017-01-31T23:10:09.000Z
|
from . import *
from vee.manifest import Manifest
class TestRequirements(TestCase):
def test_global_envvars(self):
manifest = Manifest(home=self.home())
manifest.parse_file('''
first
KEY=VALUE1
second
KEY=VALUE2
third
'''.strip().splitlines())
pkgs = list(manifest.iter_packages())
self.assertEqual(pkgs[0].base_environ, {})
self.assertEqual(pkgs[1].base_environ, {'KEY': 'VALUE1'})
self.assertEqual(pkgs[2].base_environ, {'KEY': 'VALUE2'})
def test_local_envvars(self):
manifest = Manifest(home=self.home())
manifest.parse_file('''
url -e KEY=VALUE
'''.strip().splitlines())
flat = ''.join(manifest.iter_dump()).strip()
self.assertEqual(flat, 'file:url --environ=KEY=VALUE')
def test_elif(self):
manifest = Manifest(home=self.home())
manifest.parse_file('''
% if 0:
zero
% elif 1:
one
% else:
two
% endif
'''.strip().splitlines())
pkgs = list(manifest.iter_packages())
self.assertEqual(len(pkgs), 1)
self.assertEqual(pkgs[0].name, 'one')
def test_else(self):
reqs = Manifest(home=self.home())
reqs.parse_file('''
% if 0:
zero
% elif 0:
one
% else:
two
% endif
'''.strip().splitlines())
pkgs = list(reqs.iter_packages())
self.assertEqual(len(pkgs), 1)
self.assertEqual(pkgs[0].name, 'two')
def test_platforms(self):
manifest = Manifest(home=self.home())
manifest.parse_file('''
before
% if MACOS:
macos
% elif LINUX:
linux
% endif
after
'''.strip().splitlines())
print(manifest)
pkgs = list(manifest.iter_packages())
self.assertEqual(pkgs[0].name, 'before')
if sys.platform == 'darwin':
self.assertEqual(pkgs[1].name, 'macos')
elif sys.platform == 'linux2':
self.assertEqual(pkgs[1].name, 'linux')
self.assertEqual(pkgs[2].name, 'after')
def test_includes_read(self):
manifest = Manifest(
file=os.path.abspath(os.path.join(__file__, '..', 'requirements', 'includes', 'main.txt')),
home=self.home(),
)
names = [pkg.name for pkg in manifest.iter_packages()]
self.assertEqual(names, ['main', 'always', 'true'])
def test_includes_write(self):
os.makedirs(self.sandbox())
home = self.home()
main_path = self.sandbox('main.txt')
with open(main_path, 'w') as fh:
fh.write(dedent('''
main.tgz
%include include.txt
'''.lstrip()))
incl_path = self.sandbox('include.txt')
with open(incl_path, 'w') as fh:
fh.write(dedent('''
include.tgz
'''.lstrip()))
manifest = Manifest(file=main_path, home=home)
main, incl = manifest.iter_packages()
self.assertEqual(main.name, 'main')
main.version = '1'
incl.version = '2'
manifest.dump(main_path)
manifest = Manifest(file=main_path, home=home)
main, incl = manifest.iter_packages()
self.assertEqual(main.name, 'main')
self.assertEqual(main.version, '1')
self.assertEqual(incl.version, '2')
| 27.730769
| 103
| 0.522885
|
fda63a9aba11da669e7d1d83df66aaffc285b85a
| 2,744
|
py
|
Python
|
shift.py
|
szymciem8/File-Encryption
|
a0021f7850ee225b8fea68e75a245e906a61e16a
|
[
"MIT"
] | null | null | null |
shift.py
|
szymciem8/File-Encryption
|
a0021f7850ee225b8fea68e75a245e906a61e16a
|
[
"MIT"
] | null | null | null |
shift.py
|
szymciem8/File-Encryption
|
a0021f7850ee225b8fea68e75a245e906a61e16a
|
[
"MIT"
] | null | null | null |
import random
import cv2
from functions import *
import numpy
from matplotlib import pyplot as plt
example = "Ogolnie znana teza glosi, iz uzytkownika moze rozpraszac zrozumiala zawartosc strony, kiedy ten chce zobaczyc sam jej wyglad. Jedna z mocnych stron uzywania Lorem Ipsum jest to, ze ma wiele roznych kombinacji zdan, slow i akapitow, w przeciwienstwie do zwyklego: ,,tekst, tekst, tekst, sprawiajacego, ze wyglada to ,,zbyt czytelnie po polsku. Wielu webmasterow i designerow uzywa Lorem Ipsum jako domyslnego modelu tekstu i wpisanie w internetowej wyszukiwarce 'lorem ipsum' spowoduje znalezienie bardzo wielu stron, ktore wciaz sa w budowie. Wiele wersji tekstu ewoluowalo i zmienialo sie przez lata, czasem przez przypadek, czasem specjalnie (humorystyczne wstawki itd). Lorem Ipsum jest tekstem stosowanym jako przykladowy wypelniacz w przemysle poligraficznym. Zostal po raz pierwszy uzyty w XV w. przez nieznanego drukarza do wypelnienia tekstem probnej ksiazki. Piec wiekow pozniej zaczal byc uzywany przemysle elektronicznym, pozostajac praktycznie niezmienionym. Spopularyzowal sie w latach 60. XX w. wraz z publikacja arkuszy Letrasetu, zawierajacych fragmenty Lorem Ipsum, a ostatnio z zawierajacym rozne wersje Lorem Ipsum oprogramowaniem przeznaczonym do realizacji drukow na komputerach osobistych, jak Aldus PageMaker"
key = [random.randint(0,255) for i in range(len(example))] #65536
# print("klucz")
# print(key)
# print('\n\n')
# print('Nowy run')
# print('\n\n')
# shift=125
# enc_exmpl = rot_code(example, shift)
# # print("Szyfr rotacyjny")
# # print(enc_exmpl)
# # print(rot_code(enc_exmpl, -shift))
# # print('\n\n')
# print('\n\n')
print('Szyfr z kluczem')
key_encoded = key_code(example, key, 'encode')
print(key_encoded)
print('\n\n')
key_decoded = key_code(key_encoded, key, 'decode')
print(key_decoded)
num_example = []
for el in key_encoded:
num_example.append(ord(el))
plt.hist(num_example, 128, [0,255], label='test')
plt.title('Histogram dla szyfru z kluczem równym długości tekstu')
plt.show()
# img = cv2.imread("szyfrowanie/b.png", 0)
# cv2.imshow("Original", img)
#print(img[0:100])
# encoded_img = rot_img_code(img, 100)
# cv2.imshow("encoded img", encoded_img)
# decoded_img = rot_img_code(img, -100)
# cv2.imshow("decoded img", decoded_img)
#Maksymalny klucz to 65536
# key_encoded_img = key_img_code(img, key, 'encode')
# cv2.imshow("key encoded img", key_encoded_img)
# key_decoded_img = key_img_code(key_encoded_img, key, 'decode')
# cv2.imshow("key decoded img", key_decoded_img)
#DOŁOŻYĆ HISTOGRAM NAJLEPIEJ ZA POMOCĄ OPENCV
# plt.hist(img.ravel(), 256, [0,256], label='test')
# plt.title('Histogram oryginału b.png')
# plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
| 41.575758
| 1,243
| 0.764213
|
b69d6efc248184c311a0f5c02a8893eb9e897880
| 15,221
|
py
|
Python
|
clang/utils/creduce-clang-crash.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 1,602
|
2015-01-06T11:26:31.000Z
|
2022-03-30T06:17:21.000Z
|
clang/utils/creduce-clang-crash.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 11,789
|
2015-01-05T04:50:15.000Z
|
2022-03-31T23:39:19.000Z
|
clang/utils/creduce-clang-crash.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 498
|
2015-01-08T18:58:18.000Z
|
2022-03-20T15:37:45.000Z
|
#!/usr/bin/env python
"""Calls C-Reduce to create a minimal reproducer for clang crashes.
Output files:
*.reduced.sh -- crash reproducer with minimal arguments
*.reduced.cpp -- the reduced file
*.test.sh -- interestingness test for C-Reduce
"""
from __future__ import print_function
from argparse import ArgumentParser, RawTextHelpFormatter
import os
import re
import stat
import sys
import subprocess
import pipes
import shlex
import tempfile
import shutil
from distutils.spawn import find_executable
verbose = False
creduce_cmd = None
clang_cmd = None
not_cmd = None
def verbose_print(*args, **kwargs):
if verbose:
print(*args, **kwargs)
def check_file(fname):
fname = os.path.normpath(fname)
if not os.path.isfile(fname):
sys.exit("ERROR: %s does not exist" % (fname))
return fname
def check_cmd(cmd_name, cmd_dir, cmd_path=None):
"""
Returns absolute path to cmd_path if it is given,
or absolute path to cmd_dir/cmd_name.
"""
if cmd_path:
# Make the path absolute so the creduce test can be run from any directory.
cmd_path = os.path.abspath(cmd_path)
cmd = find_executable(cmd_path)
if cmd:
return cmd
sys.exit("ERROR: executable `%s` not found" % (cmd_path))
cmd = find_executable(cmd_name, path=cmd_dir)
if cmd:
return cmd
if not cmd_dir:
cmd_dir = "$PATH"
sys.exit("ERROR: `%s` not found in %s" % (cmd_name, cmd_dir))
def quote_cmd(cmd):
return ' '.join(pipes.quote(arg) for arg in cmd)
def write_to_script(text, filename):
with open(filename, 'w') as f:
f.write(text)
os.chmod(filename, os.stat(filename).st_mode | stat.S_IEXEC)
class Reduce(object):
def __init__(self, crash_script, file_to_reduce):
crash_script_name, crash_script_ext = os.path.splitext(crash_script)
file_reduce_name, file_reduce_ext = os.path.splitext(file_to_reduce)
self.testfile = file_reduce_name + '.test.sh'
self.crash_script = crash_script_name + '.reduced' + crash_script_ext
self.file_to_reduce = file_reduce_name + '.reduced' + file_reduce_ext
shutil.copy(file_to_reduce, self.file_to_reduce)
self.clang = clang_cmd
self.clang_args = []
self.expected_output = []
self.is_crash = True
self.creduce_flags = ["--tidy"]
self.read_clang_args(crash_script, file_to_reduce)
self.read_expected_output()
def get_crash_cmd(self, cmd=None, args=None, filename=None):
if not cmd:
cmd = self.clang
if not args:
args = self.clang_args
if not filename:
filename = self.file_to_reduce
return [cmd] + args + [filename]
def read_clang_args(self, crash_script, filename):
print("\nReading arguments from crash script...")
with open(crash_script) as f:
# Assume clang call is the first non comment line.
cmd = []
for line in f:
if not line.lstrip().startswith('#'):
cmd = shlex.split(line)
break
if not cmd:
sys.exit("Could not find command in the crash script.");
# Remove clang and filename from the command
# Assume the last occurrence of the filename is the clang input file
del cmd[0]
for i in range(len(cmd)-1, -1, -1):
if cmd[i] == filename:
del cmd[i]
break
self.clang_args = cmd
verbose_print("Clang arguments:", quote_cmd(self.clang_args))
def read_expected_output(self):
print("\nGetting expected crash output...")
p = subprocess.Popen(self.get_crash_cmd(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
crash_output, _ = p.communicate()
result = []
# Remove color codes
ansi_escape = r'\x1b\[[0-?]*m'
crash_output = re.sub(ansi_escape, '', crash_output.decode('utf-8'))
# Look for specific error messages
regexes = [r"Assertion `(.+)' failed", # Linux assert()
r"Assertion failed: (.+),", # FreeBSD/Mac assert()
r"fatal error: error in backend: (.+)",
r"LLVM ERROR: (.+)",
r"UNREACHABLE executed (at .+)?!",
r"LLVM IR generation of declaration '(.+)'",
r"Generating code for declaration '(.+)'",
r"\*\*\* Bad machine code: (.+) \*\*\*"]
for msg_re in regexes:
match = re.search(msg_re, crash_output)
if match:
msg = match.group(1)
result = [msg]
print("Found message:", msg)
if "fatal error:" in msg_re:
self.is_crash = False
break
# If no message was found, use the top five stack trace functions,
# ignoring some common functions
# Five is a somewhat arbitrary number; the goal is to get a small number
# of identifying functions with some leeway for common functions
if not result:
stacktrace_re = r'[0-9]+\s+0[xX][0-9a-fA-F]+\s*([^(]+)\('
filters = ["PrintStackTraceSignalHandler",
"llvm::sys::RunSignalHandlers",
"SignalHandler", "__restore_rt", "gsignal", "abort"]
matches = re.findall(stacktrace_re, crash_output)
result = [x for x in matches if x and x.strip() not in filters][:5]
for msg in result:
print("Found stack trace function:", msg)
if not result:
print("ERROR: no crash was found")
print("The crash output was:\n========\n%s========" % crash_output)
sys.exit(1)
self.expected_output = result
def check_expected_output(self, args=None, filename=None):
if not args:
args = self.clang_args
if not filename:
filename = self.file_to_reduce
p = subprocess.Popen(self.get_crash_cmd(args=args, filename=filename),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
crash_output, _ = p.communicate()
return all(msg in crash_output.decode('utf-8') for msg in
self.expected_output)
def write_interestingness_test(self):
print("\nCreating the interestingness test...")
crash_flag = "--crash" if self.is_crash else ""
output = "#!/bin/bash\n%s %s %s >& t.log || exit 1\n" % \
(pipes.quote(not_cmd), crash_flag, quote_cmd(self.get_crash_cmd()))
for msg in self.expected_output:
output += 'grep -F %s t.log || exit 1\n' % pipes.quote(msg)
write_to_script(output, self.testfile)
self.check_interestingness()
def check_interestingness(self):
testfile = os.path.abspath(self.testfile)
# Check that the test considers the original file interesting
with open(os.devnull, 'w') as devnull:
returncode = subprocess.call(testfile, stdout=devnull)
if returncode:
sys.exit("The interestingness test does not pass for the original file.")
# Check that an empty file is not interesting
# Instead of modifying the filename in the test file, just run the command
with tempfile.NamedTemporaryFile() as empty_file:
is_interesting = self.check_expected_output(filename=empty_file.name)
if is_interesting:
sys.exit("The interestingness test passes for an empty file.")
def clang_preprocess(self):
print("\nTrying to preprocess the source file...")
with tempfile.NamedTemporaryFile() as tmpfile:
cmd_preprocess = self.get_crash_cmd() + ['-E', '-o', tmpfile.name]
cmd_preprocess_no_lines = cmd_preprocess + ['-P']
try:
subprocess.check_call(cmd_preprocess_no_lines)
if self.check_expected_output(filename=tmpfile.name):
print("Successfully preprocessed with line markers removed")
shutil.copy(tmpfile.name, self.file_to_reduce)
else:
subprocess.check_call(cmd_preprocess)
if self.check_expected_output(filename=tmpfile.name):
print("Successfully preprocessed without removing line markers")
shutil.copy(tmpfile.name, self.file_to_reduce)
else:
print("No longer crashes after preprocessing -- "
"using original source")
except subprocess.CalledProcessError:
print("Preprocessing failed")
@staticmethod
def filter_args(args, opts_equal=[], opts_startswith=[],
opts_one_arg_startswith=[]):
result = []
skip_next = False
for arg in args:
if skip_next:
skip_next = False
continue
if any(arg == a for a in opts_equal):
continue
if any(arg.startswith(a) for a in opts_startswith):
continue
if any(arg.startswith(a) for a in opts_one_arg_startswith):
skip_next = True
continue
result.append(arg)
return result
def try_remove_args(self, args, msg=None, extra_arg=None, **kwargs):
new_args = self.filter_args(args, **kwargs)
if extra_arg:
if extra_arg in new_args:
new_args.remove(extra_arg)
new_args.append(extra_arg)
if (new_args != args and
self.check_expected_output(args=new_args)):
if msg:
verbose_print(msg)
return new_args
return args
def try_remove_arg_by_index(self, args, index):
new_args = args[:index] + args[index+1:]
removed_arg = args[index]
# Heuristic for grouping arguments:
# remove next argument if it doesn't start with "-"
if index < len(new_args) and not new_args[index].startswith('-'):
del new_args[index]
removed_arg += ' ' + args[index+1]
if self.check_expected_output(args=new_args):
verbose_print("Removed", removed_arg)
return new_args, index
return args, index+1
def simplify_clang_args(self):
"""Simplify clang arguments before running C-Reduce to reduce the time the
interestingness test takes to run.
"""
print("\nSimplifying the clang command...")
# Remove some clang arguments to speed up the interestingness test
new_args = self.clang_args
new_args = self.try_remove_args(new_args,
msg="Removed debug info options",
opts_startswith=["-gcodeview",
"-debug-info-kind=",
"-debugger-tuning="])
new_args = self.try_remove_args(new_args,
msg="Removed --show-includes",
opts_startswith=["--show-includes"])
# Not suppressing warnings (-w) sometimes prevents the crash from occurring
# after preprocessing
new_args = self.try_remove_args(new_args,
msg="Replaced -W options with -w",
extra_arg='-w',
opts_startswith=["-W"])
new_args = self.try_remove_args(new_args,
msg="Replaced optimization level with -O0",
extra_arg="-O0",
opts_startswith=["-O"])
# Try to remove compilation steps
new_args = self.try_remove_args(new_args, msg="Added -emit-llvm",
extra_arg="-emit-llvm")
new_args = self.try_remove_args(new_args, msg="Added -fsyntax-only",
extra_arg="-fsyntax-only")
# Try to make implicit int an error for more sensible test output
new_args = self.try_remove_args(new_args, msg="Added -Werror=implicit-int",
opts_equal=["-w"],
extra_arg="-Werror=implicit-int")
self.clang_args = new_args
verbose_print("Simplified command:", quote_cmd(self.get_crash_cmd()))
def reduce_clang_args(self):
"""Minimize the clang arguments after running C-Reduce, to get the smallest
command that reproduces the crash on the reduced file.
"""
print("\nReducing the clang crash command...")
new_args = self.clang_args
# Remove some often occurring args
new_args = self.try_remove_args(new_args, msg="Removed -D options",
opts_startswith=["-D"])
new_args = self.try_remove_args(new_args, msg="Removed -D options",
opts_one_arg_startswith=["-D"])
new_args = self.try_remove_args(new_args, msg="Removed -I options",
opts_startswith=["-I"])
new_args = self.try_remove_args(new_args, msg="Removed -I options",
opts_one_arg_startswith=["-I"])
new_args = self.try_remove_args(new_args, msg="Removed -W options",
opts_startswith=["-W"])
# Remove other cases that aren't covered by the heuristic
new_args = self.try_remove_args(new_args, msg="Removed -mllvm",
opts_one_arg_startswith=["-mllvm"])
i = 0
while i < len(new_args):
new_args, i = self.try_remove_arg_by_index(new_args, i)
self.clang_args = new_args
reduced_cmd = quote_cmd(self.get_crash_cmd())
write_to_script(reduced_cmd, self.crash_script)
print("Reduced command:", reduced_cmd)
def run_creduce(self):
print("\nRunning C-Reduce...")
try:
p = subprocess.Popen([creduce_cmd] + self.creduce_flags +
[self.testfile, self.file_to_reduce])
p.communicate()
except KeyboardInterrupt:
# Hack to kill C-Reduce because it jumps into its own pgid
print('\n\nctrl-c detected, killed creduce')
p.kill()
def main():
global verbose
global creduce_cmd
global clang_cmd
global not_cmd
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('crash_script', type=str, nargs=1,
help="Name of the script that generates the crash.")
parser.add_argument('file_to_reduce', type=str, nargs=1,
help="Name of the file to be reduced.")
parser.add_argument('--llvm-bin', dest='llvm_bin', type=str,
help="Path to the LLVM bin directory.")
parser.add_argument('--llvm-not', dest='llvm_not', type=str,
help="The path to the `not` executable. "
"By default uses the llvm-bin directory.")
parser.add_argument('--clang', dest='clang', type=str,
help="The path to the `clang` executable. "
"By default uses the llvm-bin directory.")
parser.add_argument('--creduce', dest='creduce', type=str,
help="The path to the `creduce` executable. "
"Required if `creduce` is not in PATH environment.")
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
llvm_bin = os.path.abspath(args.llvm_bin) if args.llvm_bin else None
creduce_cmd = check_cmd('creduce', None, args.creduce)
clang_cmd = check_cmd('clang', llvm_bin, args.clang)
not_cmd = check_cmd('not', llvm_bin, args.llvm_not)
crash_script = check_file(args.crash_script[0])
file_to_reduce = check_file(args.file_to_reduce[0])
r = Reduce(crash_script, file_to_reduce)
r.simplify_clang_args()
r.write_interestingness_test()
r.clang_preprocess()
r.run_creduce()
r.reduce_clang_args()
if __name__ == '__main__':
main()
| 36.588942
| 79
| 0.628014
|
820b2551c35ffcab21175bb4a0a87aaf18d05f70
| 160
|
py
|
Python
|
app/models/schemas/tokens.py
|
drJabber/fastapi-realworld-example-app
|
808b63bb290e358679f3a3bd8ddb911bc3a3b71d
|
[
"MIT"
] | null | null | null |
app/models/schemas/tokens.py
|
drJabber/fastapi-realworld-example-app
|
808b63bb290e358679f3a3bd8ddb911bc3a3b71d
|
[
"MIT"
] | null | null | null |
app/models/schemas/tokens.py
|
drJabber/fastapi-realworld-example-app
|
808b63bb290e358679f3a3bd8ddb911bc3a3b71d
|
[
"MIT"
] | null | null | null |
from typing import Optional
from pydantic import BaseModel
class Token(BaseModel):
access_token: str
refresh_token: Optional[str]
token_type: str
| 17.777778
| 32
| 0.7625
|
b3ec66834901333f7e998360712ac1c06bdf8c2e
| 445
|
py
|
Python
|
snippets/Graphs/bellman_ford.py
|
shan61916/PyRival
|
d520bdec748e4d47b22c1d053b26a35ba8df658b
|
[
"MIT"
] | 1
|
2019-08-05T13:31:12.000Z
|
2019-08-05T13:31:12.000Z
|
snippets/Graphs/bellman_ford.py
|
shan61916/PyRival
|
d520bdec748e4d47b22c1d053b26a35ba8df658b
|
[
"MIT"
] | null | null | null |
snippets/Graphs/bellman_ford.py
|
shan61916/PyRival
|
d520bdec748e4d47b22c1d053b26a35ba8df658b
|
[
"MIT"
] | null | null | null |
def bellman_ford(vertices, edges, start):
dist = [float('inf')] * len(vertices)
pred = [None] * len(vertices)
dist[start] = 0
for _ in range(len(vertices)):
for u, v, d in edges:
if dist[u] + d < dist[v]:
dist[v] = dist[u] + d
pred[v] = u
"""Sanity Check
for u, v, d in edges:
if dist[u] + d < dist[v]:
return None
"""
return dist, pred
| 22.25
| 41
| 0.474157
|
a504802b7e5fe68cba0289346dde553715d8ae65
| 24,046
|
py
|
Python
|
src/insightface/deploy/mtcnn_detector.py
|
Sahil-Chavan/Face_Recognition_Attendance_System
|
fc8d82cb8d128baa86e744bf6efac293bd667e5e
|
[
"MIT"
] | null | null | null |
src/insightface/deploy/mtcnn_detector.py
|
Sahil-Chavan/Face_Recognition_Attendance_System
|
fc8d82cb8d128baa86e744bf6efac293bd667e5e
|
[
"MIT"
] | null | null | null |
src/insightface/deploy/mtcnn_detector.py
|
Sahil-Chavan/Face_Recognition_Attendance_System
|
fc8d82cb8d128baa86e744bf6efac293bd667e5e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import os
import mxnet as mx
import numpy as np
import math
import cv2
from itertools import repeat
# from src.insightface.deploy.helper import nms, adjust_input, generate_bbox, detect_first_stage_warpper
# from src.insightface import adjust_input
# from src.insightface import nms
import sys
from src.insightface.deploy.helper import adjust_input, detect_first_stage_warpper
from src.insightface.src.align.detect_face import nms
try:
from itertools import izip
except ImportError:
izip = zip
# from helper import nms, adjust_input, generate_bbox, detect_first_stage_warpper
class MtcnnDetector(object):
"""
Joint Face Detection and Alignment using Multi-task Cascaded Convolutional Neural Networks
see https://github.com/kpzhang93/MTCNN_face_detection_alignment
this is a mxnet version
"""
def __init__(self,
model_folder='.',
minsize = 20,
threshold = [0.6, 0.7, 0.8],
factor = 0.709,
num_worker = 1,
accurate_landmark = False,
ctx=mx.cpu()):
"""
Initialize the detector
Parameters:
----------
model_folder : string
path for the models
minsize : float number
minimal face to detect
threshold : float number
detect threshold for 3 stages
factor: float number
scale factor for image pyramid
num_worker: int number
number of processes we use for first stage
accurate_landmark: bool
use accurate landmark localization or not
"""
self.num_worker = num_worker
self.accurate_landmark = accurate_landmark
# load 4 models from folder
models = ['det1', 'det2', 'det3','det4']
models = [ os.path.join(model_folder, f) for f in models]
self.PNets = []
for i in range(num_worker):
workner_net = mx.model.FeedForward.load(models[0], 1, ctx=ctx)
self.PNets.append(workner_net)
#self.Pool = Pool(num_worker)
self.RNet = mx.model.FeedForward.load(models[1], 1, ctx=ctx)
self.ONet = mx.model.FeedForward.load(models[2], 1, ctx=ctx)
self.LNet = mx.model.FeedForward.load(models[3], 1, ctx=ctx)
self.minsize = float(minsize)
self.factor = float(factor)
self.threshold = threshold
def convert_to_square(self, bbox):
"""
convert bbox to square
Parameters:
----------
bbox: numpy array , shape n x 5
input bbox
Returns:
-------
square bbox
"""
square_bbox = bbox.copy()
h = bbox[:, 3] - bbox[:, 1] + 1
w = bbox[:, 2] - bbox[:, 0] + 1
max_side = np.maximum(h,w)
square_bbox[:, 0] = bbox[:, 0] + w*0.5 - max_side*0.5
square_bbox[:, 1] = bbox[:, 1] + h*0.5 - max_side*0.5
square_bbox[:, 2] = square_bbox[:, 0] + max_side - 1
square_bbox[:, 3] = square_bbox[:, 1] + max_side - 1
return square_bbox
def calibrate_box(self, bbox, reg):
"""
calibrate bboxes
Parameters:
----------
bbox: numpy array, shape n x 5
input bboxes
reg: numpy array, shape n x 4
bboxex adjustment
Returns:
-------
bboxes after refinement
"""
w = bbox[:, 2] - bbox[:, 0] + 1
w = np.expand_dims(w, 1)
h = bbox[:, 3] - bbox[:, 1] + 1
h = np.expand_dims(h, 1)
reg_m = np.hstack([w, h, w, h])
aug = reg_m * reg
bbox[:, 0:4] = bbox[:, 0:4] + aug
return bbox
def pad(self, bboxes, w, h):
"""
pad the the bboxes, alse restrict the size of it
Parameters:
----------
bboxes: numpy array, n x 5
input bboxes
w: float number
width of the input image
h: float number
height of the input image
Returns :
------s
dy, dx : numpy array, n x 1
start point of the bbox in target image
edy, edx : numpy array, n x 1
end point of the bbox in target image
y, x : numpy array, n x 1
start point of the bbox in original image
ex, ex : numpy array, n x 1
end point of the bbox in original image
tmph, tmpw: numpy array, n x 1
height and width of the bbox
"""
tmpw, tmph = bboxes[:, 2] - bboxes[:, 0] + 1, bboxes[:, 3] - bboxes[:, 1] + 1
num_box = bboxes.shape[0]
dx , dy= np.zeros((num_box, )), np.zeros((num_box, ))
edx, edy = tmpw.copy()-1, tmph.copy()-1
x, y, ex, ey = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
tmp_index = np.where(ex > w-1)
edx[tmp_index] = tmpw[tmp_index] + w - 2 - ex[tmp_index]
ex[tmp_index] = w - 1
tmp_index = np.where(ey > h-1)
edy[tmp_index] = tmph[tmp_index] + h - 2 - ey[tmp_index]
ey[tmp_index] = h - 1
tmp_index = np.where(x < 0)
dx[tmp_index] = 0 - x[tmp_index]
x[tmp_index] = 0
tmp_index = np.where(y < 0)
dy[tmp_index] = 0 - y[tmp_index]
y[tmp_index] = 0
return_list = [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
return_list = [item.astype(np.int32) for item in return_list]
return return_list
def slice_index(self, number):
"""
slice the index into (n,n,m), m < n
Parameters:
----------
number: int number
number
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
num_list = range(number)
return list(chunks(num_list, self.num_worker))
def detect_face_limited(self, img, det_type=2):
height, width, _ = img.shape
if det_type>=2:
total_boxes = np.array( [ [0.0, 0.0, img.shape[1], img.shape[0], 0.9] ] ,dtype=np.float32)
num_box = total_boxes.shape[0]
# pad the bbox
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)
# (3, 24, 24) is the input shape for RNet
input_buf = np.zeros((num_box, 3, 24, 24), dtype=np.float32)
for i in range(num_box):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]
input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (24, 24)))
output = self.RNet.predict(input_buf)
# filter the total_boxes with threshold
passed = np.where(output[1][:, 1] > self.threshold[1])
total_boxes = total_boxes[passed]
if total_boxes.size == 0:
return None
total_boxes[:, 4] = output[1][passed, 1].reshape((-1,))
reg = output[0][passed]
# nms
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick]
total_boxes = self.calibrate_box(total_boxes, reg[pick])
total_boxes = self.convert_to_square(total_boxes)
total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])
else:
total_boxes = np.array( [ [0.0, 0.0, img.shape[1], img.shape[0], 0.9] ] ,dtype=np.float32)
num_box = total_boxes.shape[0]
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)
# (3, 48, 48) is the input shape for ONet
input_buf = np.zeros((num_box, 3, 48, 48), dtype=np.float32)
for i in range(num_box):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.float32)
tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]
input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (48, 48)))
output = self.ONet.predict(input_buf)
#print(output[2])
# filter the total_boxes with threshold
passed = np.where(output[2][:, 1] > self.threshold[2])
total_boxes = total_boxes[passed]
if total_boxes.size == 0:
return None
total_boxes[:, 4] = output[2][passed, 1].reshape((-1,))
reg = output[1][passed]
points = output[0][passed]
# compute landmark points
bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1
bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[:, 0:5] = np.expand_dims(total_boxes[:, 0], 1) + np.expand_dims(bbw, 1) * points[:, 0:5]
points[:, 5:10] = np.expand_dims(total_boxes[:, 1], 1) + np.expand_dims(bbh, 1) * points[:, 5:10]
# nms
total_boxes = self.calibrate_box(total_boxes, reg)
pick = nms(total_boxes, 0.7, 'Min')
total_boxes = total_boxes[pick]
points = points[pick]
if not self.accurate_landmark:
return total_boxes, points
#############################################
# extended stage
#############################################
num_box = total_boxes.shape[0]
patchw = np.maximum(total_boxes[:, 2]-total_boxes[:, 0]+1, total_boxes[:, 3]-total_boxes[:, 1]+1)
patchw = np.round(patchw*0.25)
# make it even
patchw[np.where(np.mod(patchw,2) == 1)] += 1
input_buf = np.zeros((num_box, 15, 24, 24), dtype=np.float32)
for i in range(5):
x, y = points[:, i], points[:, i+5]
x, y = np.round(x-0.5*patchw), np.round(y-0.5*patchw)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(np.vstack([x, y, x+patchw-1, y+patchw-1]).T,
width,
height)
for j in range(num_box):
tmpim = np.zeros((tmpw[j], tmpw[j], 3), dtype=np.float32)
tmpim[dy[j]:edy[j]+1, dx[j]:edx[j]+1, :] = img[y[j]:ey[j]+1, x[j]:ex[j]+1, :]
input_buf[j, i*3:i*3+3, :, :] = adjust_input(cv2.resize(tmpim, (24, 24)))
output = self.LNet.predict(input_buf)
pointx = np.zeros((num_box, 5))
pointy = np.zeros((num_box, 5))
for k in range(5):
# do not make a large movement
tmp_index = np.where(np.abs(output[k]-0.5) > 0.35)
output[k][tmp_index[0]] = 0.5
pointx[:, k] = np.round(points[:, k] - 0.5*patchw) + output[k][:, 0]*patchw
pointy[:, k] = np.round(points[:, k+5] - 0.5*patchw) + output[k][:, 1]*patchw
points = np.hstack([pointx, pointy])
points = points.astype(np.int32)
return total_boxes, points
def detect_face(self, img, det_type=0):
"""
detect face over img
Parameters:
----------
img: numpy array, bgr order of shape (1, 3, n, m)
input image
Retures:
-------
bboxes: numpy array, n x 5 (x1,y2,x2,y2,score)
bboxes
points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)
landmarks
"""
# check input
height, width, _ = img.shape
if det_type==0:
MIN_DET_SIZE = 12
if img is None:
return None
# only works for color image
if len(img.shape) != 3:
return None
# detected boxes
total_boxes = []
minl = min( height, width)
# get all the valid scales
scales = []
m = MIN_DET_SIZE/self.minsize
minl *= m
factor_count = 0
while minl > MIN_DET_SIZE:
scales.append(m*self.factor**factor_count)
minl *= self.factor
factor_count += 1
#############################################
# first stage
#############################################
#for scale in scales:
# return_boxes = self.detect_first_stage(img, scale, 0)
# if return_boxes is not None:
# total_boxes.append(return_boxes)
sliced_index = self.slice_index(len(scales))
total_boxes = []
for batch in sliced_index:
#local_boxes = self.Pool.map( detect_first_stage_warpper, \
# izip(repeat(img), self.PNets[:len(batch)], [scales[i] for i in batch], repeat(self.threshold[0])) )
local_boxes = map( detect_first_stage_warpper, \
izip(repeat(img), self.PNets[:len(batch)], [scales[i] for i in batch], repeat(self.threshold[0])) )
total_boxes.extend(local_boxes)
# remove the Nones
total_boxes = [ i for i in total_boxes if i is not None]
if len(total_boxes) == 0:
return None
total_boxes = np.vstack(total_boxes)
if total_boxes.size == 0:
return None
# merge the detection from first stage
pick = nms(total_boxes[:, 0:5], 0.7, 'Union')
total_boxes = total_boxes[pick]
bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1
bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1
# refine the bboxes
total_boxes = np.vstack([total_boxes[:, 0]+total_boxes[:, 5] * bbw,
total_boxes[:, 1]+total_boxes[:, 6] * bbh,
total_boxes[:, 2]+total_boxes[:, 7] * bbw,
total_boxes[:, 3]+total_boxes[:, 8] * bbh,
total_boxes[:, 4]
])
total_boxes = total_boxes.T
total_boxes = self.convert_to_square(total_boxes)
total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])
else:
total_boxes = np.array( [ [0.0, 0.0, img.shape[1], img.shape[0], 0.9] ] ,dtype=np.float32)
#############################################
# second stage
#############################################
num_box = total_boxes.shape[0]
# pad the bbox
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)
# (3, 24, 24) is the input shape for RNet
input_buf = np.zeros((num_box, 3, 24, 24), dtype=np.float32)
for i in range(num_box):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.uint8)
tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]
input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (24, 24)))
output = self.RNet.predict(input_buf)
# filter the total_boxes with threshold
passed = np.where(output[1][:, 1] > self.threshold[1])
total_boxes = total_boxes[passed]
if total_boxes.size == 0:
return None
total_boxes[:, 4] = output[1][passed, 1].reshape((-1,))
reg = output[0][passed]
# nms
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick]
total_boxes = self.calibrate_box(total_boxes, reg[pick])
total_boxes = self.convert_to_square(total_boxes)
total_boxes[:, 0:4] = np.round(total_boxes[:, 0:4])
#############################################
# third stage
#############################################
num_box = total_boxes.shape[0]
# pad the bbox
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(total_boxes, width, height)
# (3, 48, 48) is the input shape for ONet
input_buf = np.zeros((num_box, 3, 48, 48), dtype=np.float32)
for i in range(num_box):
tmp = np.zeros((tmph[i], tmpw[i], 3), dtype=np.float32)
tmp[dy[i]:edy[i]+1, dx[i]:edx[i]+1, :] = img[y[i]:ey[i]+1, x[i]:ex[i]+1, :]
input_buf[i, :, :, :] = adjust_input(cv2.resize(tmp, (48, 48)))
output = self.ONet.predict(input_buf)
# filter the total_boxes with threshold
passed = np.where(output[2][:, 1] > self.threshold[2])
total_boxes = total_boxes[passed]
if total_boxes.size == 0:
return None
total_boxes[:, 4] = output[2][passed, 1].reshape((-1,))
reg = output[1][passed]
points = output[0][passed]
# compute landmark points
bbw = total_boxes[:, 2] - total_boxes[:, 0] + 1
bbh = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[:, 0:5] = np.expand_dims(total_boxes[:, 0], 1) + np.expand_dims(bbw, 1) * points[:, 0:5]
points[:, 5:10] = np.expand_dims(total_boxes[:, 1], 1) + np.expand_dims(bbh, 1) * points[:, 5:10]
# nms
total_boxes = self.calibrate_box(total_boxes, reg)
pick = nms(total_boxes, 0.7, 'Min')
total_boxes = total_boxes[pick]
points = points[pick]
if not self.accurate_landmark:
return total_boxes, points
#############################################
# extended stage
#############################################
num_box = total_boxes.shape[0]
patchw = np.maximum(total_boxes[:, 2]-total_boxes[:, 0]+1, total_boxes[:, 3]-total_boxes[:, 1]+1)
patchw = np.round(patchw*0.25)
# make it even
patchw[np.where(np.mod(patchw,2) == 1)] += 1
input_buf = np.zeros((num_box, 15, 24, 24), dtype=np.float32)
for i in range(5):
x, y = points[:, i], points[:, i+5]
x, y = np.round(x-0.5*patchw), np.round(y-0.5*patchw)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = self.pad(np.vstack([x, y, x+patchw-1, y+patchw-1]).T,
width,
height)
for j in range(num_box):
tmpim = np.zeros((tmpw[j], tmpw[j], 3), dtype=np.float32)
tmpim[dy[j]:edy[j]+1, dx[j]:edx[j]+1, :] = img[y[j]:ey[j]+1, x[j]:ex[j]+1, :]
input_buf[j, i*3:i*3+3, :, :] = adjust_input(cv2.resize(tmpim, (24, 24)))
output = self.LNet.predict(input_buf)
pointx = np.zeros((num_box, 5))
pointy = np.zeros((num_box, 5))
for k in range(5):
# do not make a large movement
tmp_index = np.where(np.abs(output[k]-0.5) > 0.35)
output[k][tmp_index[0]] = 0.5
pointx[:, k] = np.round(points[:, k] - 0.5*patchw) + output[k][:, 0]*patchw
pointy[:, k] = np.round(points[:, k+5] - 0.5*patchw) + output[k][:, 1]*patchw
points = np.hstack([pointx, pointy])
points = points.astype(np.int32)
return total_boxes, points
def list2colmatrix(self, pts_list):
"""
convert list to column matrix
Parameters:
----------
pts_list:
input list
Retures:
-------
colMat:
"""
assert len(pts_list) > 0
colMat = []
for i in range(len(pts_list)):
colMat.append(pts_list[i][0])
colMat.append(pts_list[i][1])
colMat = np.matrix(colMat).transpose()
return colMat
def find_tfrom_between_shapes(self, from_shape, to_shape):
"""
find transform between shapes
Parameters:
----------
from_shape:
to_shape:
Retures:
-------
tran_m:
tran_b:
"""
assert from_shape.shape[0] == to_shape.shape[0] and from_shape.shape[0] % 2 == 0
sigma_from = 0.0
sigma_to = 0.0
cov = np.matrix([[0.0, 0.0], [0.0, 0.0]])
# compute the mean and cov
from_shape_points = from_shape.reshape(from_shape.shape[0]/2, 2)
to_shape_points = to_shape.reshape(to_shape.shape[0]/2, 2)
mean_from = from_shape_points.mean(axis=0)
mean_to = to_shape_points.mean(axis=0)
for i in range(from_shape_points.shape[0]):
temp_dis = np.linalg.norm(from_shape_points[i] - mean_from)
sigma_from += temp_dis * temp_dis
temp_dis = np.linalg.norm(to_shape_points[i] - mean_to)
sigma_to += temp_dis * temp_dis
cov += (to_shape_points[i].transpose() - mean_to.transpose()) * (from_shape_points[i] - mean_from)
sigma_from = sigma_from / to_shape_points.shape[0]
sigma_to = sigma_to / to_shape_points.shape[0]
cov = cov / to_shape_points.shape[0]
# compute the affine matrix
s = np.matrix([[1.0, 0.0], [0.0, 1.0]])
u, d, vt = np.linalg.svd(cov)
if np.linalg.det(cov) < 0:
if d[1] < d[0]:
s[1, 1] = -1
else:
s[0, 0] = -1
r = u * s * vt
c = 1.0
if sigma_from != 0:
c = 1.0 / sigma_from * np.trace(np.diag(d) * s)
tran_b = mean_to.transpose() - c * r * mean_from.transpose()
tran_m = c * r
return tran_m, tran_b
def extract_image_chips(self, img, points, desired_size=256, padding=0):
"""
crop and align face
Parameters:
----------
img: numpy array, bgr order of shape (1, 3, n, m)
input image
points: numpy array, n x 10 (x1, x2 ... x5, y1, y2 ..y5)
desired_size: default 256
padding: default 0
Retures:
-------
crop_imgs: list, n
cropped and aligned faces
"""
crop_imgs = []
for p in points:
shape =[]
for k in range(len(p)/2):
shape.append(p[k])
shape.append(p[k+5])
if padding > 0:
padding = padding
else:
padding = 0
# average positions of face points
mean_face_shape_x = [0.224152, 0.75610125, 0.490127, 0.254149, 0.726104]
mean_face_shape_y = [0.2119465, 0.2119465, 0.628106, 0.780233, 0.780233]
from_points = []
to_points = []
for i in range(len(shape)/2):
x = (padding + mean_face_shape_x[i]) / (2 * padding + 1) * desired_size
y = (padding + mean_face_shape_y[i]) / (2 * padding + 1) * desired_size
to_points.append([x, y])
from_points.append([shape[2*i], shape[2*i+1]])
# convert the points to Mat
from_mat = self.list2colmatrix(from_points)
to_mat = self.list2colmatrix(to_points)
# compute the similar transfrom
tran_m, tran_b = self.find_tfrom_between_shapes(from_mat, to_mat)
probe_vec = np.matrix([1.0, 0.0]).transpose()
probe_vec = tran_m * probe_vec
scale = np.linalg.norm(probe_vec)
angle = 180.0 / math.pi * math.atan2(probe_vec[1, 0], probe_vec[0, 0])
from_center = [(shape[0]+shape[2])/2.0, (shape[1]+shape[3])/2.0]
to_center = [0, 0]
to_center[1] = desired_size * 0.4
to_center[0] = desired_size * 0.5
ex = to_center[0] - from_center[0]
ey = to_center[1] - from_center[1]
rot_mat = cv2.getRotationMatrix2D((from_center[0], from_center[1]), -1*angle, scale)
rot_mat[0][2] += ex
rot_mat[1][2] += ey
chips = cv2.warpAffine(img, rot_mat, (desired_size, desired_size))
crop_imgs.append(chips)
return crop_imgs
| 36.050975
| 124
| 0.501081
|
4d323b176381e873b4efc820fad41467b85b7b5e
| 5,479
|
py
|
Python
|
src/models/hg_2D_res_CLSTM.py
|
DNALuo/3Dposes
|
c5e2ed5fea612318d7715e239176571f593ccf83
|
[
"MIT"
] | null | null | null |
src/models/hg_2D_res_CLSTM.py
|
DNALuo/3Dposes
|
c5e2ed5fea612318d7715e239176571f593ccf83
|
[
"MIT"
] | null | null | null |
src/models/hg_2D_res_CLSTM.py
|
DNALuo/3Dposes
|
c5e2ed5fea612318d7715e239176571f593ccf83
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as f
from .layers.Residual import Residual
# apply the LSTM conv on each pixel
class CLSTM(nn.Module):
"""
Convolutional LSTM for Hourglass.
* Residual Block(input: numIn x w x h | output: numOut x w x h)
* CLSTM(input: )
"""
def __init__(self, inputSize, hiddenSize, numLayers, seqLength, res):
super(CLSTM, self).__init__()
self.inputSize = inputSize
self.hiddenSize = hiddenSize
self.numLayers = numLayers
self.seqLength = seqLength
self.res = res
# torch.nn.LSTM(input_size,hidden_size,num_layers)
self.lstm = nn.LSTM(self.inputSize, self.hiddenSize, self.numLayers, batch_first=True)
def forward(self, inp):
# Replicate encoder output
repDim = list(inp.unsqueeze(1).shape)
repDim[1] = self.seqLength
rep = inp.unsqueeze(1).expand(repDim)
# Merge into one mini-batch
x = inp.transpose(1, 2).transpose(2, 3)
x = x.contiguous()
# x = x.view(-1, self.inputSize)
x = x.view(-1, 1, self.inputSize)
x = nn.ZeroPad2d((0, 0, 0, 15))(x)
# LSTM
## features on each pixel, i.o.w., 1 x 1 conv layers
'''
Input: seq_len * batch * input_size
seq_len: time-steps, number of sequence members
batch: number of sequences
input_size: non-batch feature size, c x w x h for conv input
'''
h, _ = self.lstm(x)
h = h.contiguous()
# Split from one mini-batch
h = h.view(-1, self.res, self.res, self.seqLength, self.hiddenSize)
h = h.transpose(1, 3).transpose(2, 4)
# h = h.view(rep.shape)
# Add residual to encoder output
out = h + rep
# Merger output in batch dimension
out = out.view(-1, self.hiddenSize, self.res, self.res)
return out
class HourglassLSTM(nn.Module):
"""
One Hourglass with LSTM.
"""
def __init__(self, nFeats, nRes, nModules, hiddenSize, numLayers, seqLength):
super(HourglassLSTM, self).__init__()
# Parameters
## Hyperparameters for Hourglass
self.nFeats = nFeats
self.n = nRes
self.nModules = nModules
## Hyperparameters for LSTM
self.hiddenSize = hiddenSize
self.numLayers = numLayers
self.seqLength = seqLength
# Network
self.res1 = Residual(self.nFeats, self.nFeats)
self.clstm1 = CLSTM(self.nFeats, self.hiddenSize, self.numLayers, self.seqLength, 2**(nRes+2))
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.res2 = Residual(self.nFeats, self.nFeats)
if self.n > 1:
self.hglstm = HourglassLSTM(self.nFeats, nRes-1, self.nModules, self.hiddenSize,
self.numLayers, self.seqLength)
else:
self.res3 = Residual(self.nFeats, self.nFeats)
self.clstm2 = CLSTM(self.nFeats, self.hiddenSize, self.numLayers, self.seqLength, 2**(nRes+1))
self.res4 = Residual(self.nFeats, self.nFeats)
def forward(self, inp):
# Upper Branch
up1 = self.res1(inp)
up1 = self.clstm1(up1)
# Lower Branch
x = self.maxpool(inp)
x = self.res2(x)
if self.n > 1:
x = self.hglstm(x)
else:
x = self.res3(x)
x = self.clstm2(x)
x = self.res4(x)
up2 = f.interpolate(x, scale_factor=2, mode='nearest')
return up1 + up2
class Hourglass2DPrediction(nn.Module):
def __init__(self, opt):
super(Hourglass2DPrediction, self).__init__()
# Hyperparameters for Hourglass from opt
self.nFeats = opt.nFeats
self.nModules = opt.nModules
self.outputRes = opt.outputRes
self.nJoints = opt.nJoints
# Hyperparameters for LSTM from opt
self.seqLength = opt.preSeqLen
self.hiddenSize = opt.hiddenSize
self.numLayers = opt.numLayers
self.conv1 = nn.Conv2d(3, 64, bias=True, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.r1 = Residual(64, 128)
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.r2 = Residual(128, 128)
self.r3 = Residual(128, self.nFeats)
self.hgLSTM = HourglassLSTM(self.nFeats, 4, self.nModules, self.hiddenSize, self.numLayers, self.seqLength)
# 1×1 conv remaps of heatmaps
self.lin = nn.Sequential(nn.Conv2d(self.nFeats, self.nFeats, bias=True, kernel_size=1, stride=1),
nn.BatchNorm2d(self.nFeats),
self.relu)
# Output heatmaps
self.conv2 = nn.Conv2d(self.nFeats, self.nJoints, kernel_size=1, stride=1, bias=True)
def forward(self, inp):
# Initial processing of the image
x = self.conv1(inp)
x = self.bn1(x)
x = self.relu(x)
x = self.r1(x)
x = self.maxpool(x)
x = self.r2(x)
x = self.r3(x)
# Forecasting
x = self.hgLSTM(x)
# Linear layers to produce first set of predictions
x = self.lin(x)
# Output heatmaps
out = self.conv2(x)
# Split output in batch dimension
out = out.view(-1, self.seqLength, self.nJoints, self.outputRes, self.outputRes)
return out
| 36.284768
| 115
| 0.588246
|
6b5248e5fd28abac896e775f99dfa768139a256b
| 3,458
|
py
|
Python
|
official/core/train_lib_test.py
|
kevinjesse/models
|
d11b487a2f5b7c9fc511e4219d7a5e6ef9148357
|
[
"Apache-2.0"
] | 1
|
2021-11-22T04:14:10.000Z
|
2021-11-22T04:14:10.000Z
|
official/core/train_lib_test.py
|
kevinjesse/models
|
d11b487a2f5b7c9fc511e4219d7a5e6ef9148357
|
[
"Apache-2.0"
] | null | null | null |
official/core/train_lib_test.py
|
kevinjesse/models
|
d11b487a2f5b7c9fc511e4219d7a5e6ef9148357
|
[
"Apache-2.0"
] | 1
|
2021-10-30T05:01:52.000Z
|
2021-10-30T05:01:52.000Z
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for train_ctl_lib."""
import json
import os
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
import tensorflow as tf
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from official.common import flags as tfm_flags
# pylint: disable=unused-import
from official.common import registry_imports
# pylint: enable=unused-import
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
FLAGS = flags.FLAGS
tfm_flags.define_flags()
class TrainTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(TrainTest, self).setUp()
self._test_config = {
'trainer': {
'checkpoint_interval': 10,
'steps_per_loop': 10,
'summary_interval': 10,
'train_steps': 10,
'validation_steps': 5,
'validation_interval': 10,
'optimizer_config': {
'optimizer': {
'type': 'sgd',
},
'learning_rate': {
'type': 'constant'
}
}
},
}
@combinations.generate(
combinations.combine(
distribution_strategy=[
strategy_combinations.default_strategy,
strategy_combinations.tpu_strategy,
strategy_combinations.one_device_strategy_gpu,
],
mode='eager',
flag_mode=['train', 'eval', 'train_and_eval'],
run_post_eval=[True, False]))
def test_end_to_end(self, distribution_strategy, flag_mode, run_post_eval):
model_dir = self.get_temp_dir()
flags_dict = dict(
experiment='mock',
mode=flag_mode,
model_dir=model_dir,
params_override=json.dumps(self._test_config))
with flagsaver.flagsaver(**flags_dict):
params = train_utils.parse_configuration(flags.FLAGS)
train_utils.serialize_config(params, model_dir)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
logs = train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=flag_mode,
params=params,
model_dir=model_dir,
run_post_eval=run_post_eval)
if run_post_eval:
self.assertNotEmpty(logs)
else:
self.assertEmpty(logs)
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(model_dir, 'params.yaml')))
if flag_mode != 'eval':
self.assertNotEmpty(
tf.io.gfile.glob(os.path.join(model_dir, 'checkpoint')))
if __name__ == '__main__':
tf.test.main()
| 32.317757
| 80
| 0.652979
|
7cae71735017afad1e855cba685a3c5e269209b0
| 699
|
py
|
Python
|
setup.py
|
deresmos/xrandr-manage
|
035373fc419c656076e5bff0c7ab42d64f6ce066
|
[
"MIT"
] | null | null | null |
setup.py
|
deresmos/xrandr-manage
|
035373fc419c656076e5bff0c7ab42d64f6ce066
|
[
"MIT"
] | null | null | null |
setup.py
|
deresmos/xrandr-manage
|
035373fc419c656076e5bff0c7ab42d64f6ce066
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
requires = ["prompt_toolkit"]
with open("README.md", "r", encoding="utf-8") as f:
readme = f.read()
setup(
name="xrandr_manager",
version="0.1.0",
description="Xrandr manager",
long_description=readme,
long_description_content_type="text/markdown",
author="deresmos",
author_email="deresmos@gmail.com",
url="https://github.com/deresmos/xrandr-manager",
python_requires=">=3.7",
packages=find_packages(),
include_package_data=False,
keywords=["tools"],
license="MIT License",
install_requires=requires,
entry_points={"console_scripts": ["xrandr-manager = xrandr_manager.console:run"]},
)
| 26.884615
| 86
| 0.692418
|
bbaaf05034f6672d729f9c5ffeafd102a1f8867b
| 132,925
|
py
|
Python
|
src/sage/matrix/special.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | 1
|
2021-10-18T01:24:04.000Z
|
2021-10-18T01:24:04.000Z
|
src/sage/matrix/special.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | null | null | null |
src/sage/matrix/special.py
|
hsm207/sage
|
020bd59ec28717bfab9af44d2231c53da1ff99f1
|
[
"BSL-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Constructors for special matrices
This module gathers several constructors for special, commonly used or
interesting matrices. These can be reached through ``matrix.<tab>``.
For example, here is a circulant matrix of order five::
sage: matrix.circulant(SR.var('a b c d e'))
[a b c d e]
[e a b c d]
[d e a b c]
[c d e a b]
[b c d e a]
The following constructions are available:
.. csv-table::
:class: contentstable
:widths: 30
:delim: |
:meth:`~sage.matrix.special.block_diagonal_matrix`
:meth:`~sage.matrix.special.block_matrix`
:meth:`~sage.matrix.special.circulant`
:meth:`~sage.matrix.special.column_matrix`
:meth:`~sage.matrix.special.companion_matrix`
:meth:`~sage.matrix.special.diagonal_matrix`
:meth:`~sage.matrix.special.elementary_matrix`
:meth:`~sage.matrix.special.hankel`
:meth:`~sage.matrix.special.hilbert`
:meth:`~sage.matrix.special.identity_matrix`
:meth:`~sage.matrix.special.ith_to_zero_rotation_matrix`
:meth:`~sage.matrix.special.jordan_block`
:meth:`~sage.matrix.special.lehmer`
:meth:`~sage.matrix.special.ones_matrix`
:meth:`~sage.matrix.special.random_matrix`
:meth:`~sage.matrix.special.random_diagonalizable_matrix`
:meth:`~sage.matrix.special.random_echelonizable_matrix`
:meth:`~sage.matrix.special.random_rref_matrix`
:meth:`~sage.matrix.special.random_subspaces_matrix`
:meth:`~sage.matrix.special.random_unimodular_matrix`
:meth:`~sage.matrix.special.toeplitz`
:meth:`~sage.matrix.special.vandermonde`
:meth:`~sage.matrix.special.vector_on_axis_rotation_matrix`
:meth:`~sage.matrix.special.zero_matrix`
The Combinatorics module provides further matrix constructors, such as Hadamard
matrices and Latin squares. See:
- :mod:`sage.combinat.matrices.hadamard_matrix`
- :mod:`sage.combinat.matrices.latin`
"""
# ****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import print_function, absolute_import, division
from sage.rings.ring import is_Ring
import sage.matrix.matrix_space as matrix_space
from sage.modules.free_module_element import vector
from sage.structure.element import is_Matrix
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.integer import Integer
from sage.misc.misc_c import running_total
from copy import copy
from .constructor import matrix
import sage.categories.pushout
def matrix_method(func=None, name=None):
"""
Allows a function to be tab-completed on the global matrix
constructor object.
INPUT:
- ``*function`` -- a single argument. The function that is being
decorated.
- ``**kwds`` -- a single optional keyword argument
``name=<string>``. The name of the corresponding method in the
global matrix constructor object. If not given, it is derived
from the function name.
EXAMPLES::
sage: from sage.matrix.constructor import matrix_method
sage: def foo_matrix(n): return matrix.diagonal(range(n))
sage: matrix_method(foo_matrix)
<function foo_matrix at ...>
sage: matrix.foo(5)
[0 0 0 0 0]
[0 1 0 0 0]
[0 0 2 0 0]
[0 0 0 3 0]
[0 0 0 0 4]
sage: matrix_method(foo_matrix, name='bar')
<function foo_matrix at ...>
sage: matrix.bar(3)
[0 0 0]
[0 1 0]
[0 0 2]
"""
if func is not None:
if name is None:
name = func.__name__.replace('matrix', '').strip('_')
prefix = " This function is available as %s(...) and matrix.%s(...)." % (
func.__name__, name)
func.__doc__ = "%s\n\n%s" % (prefix, func.__doc__)
setattr(matrix, name, func)
return func
else:
return lambda func: matrix_method(func, name=name)
@matrix_method
def column_matrix(*args, **kwds):
r"""
Construct a matrix, and then swap rows for columns and columns for rows.
.. note::
Linear algebra in Sage favors rows over columns. So,
generally, when creating a matrix, input vectors and lists are
treated as rows. This function is a convenience that turns
around this convention when creating a matrix. If you are not
familiar with the usual :func:`matrix`
constructor, you might want to consider it first.
INPUT:
Inputs are almost exactly the same as for the :func:`matrix`
constructor, which are documented there. But see
examples below for how dimensions are handled.
OUTPUT:
Output is exactly the transpose of what the :func:`matrix`
constructor would return. In other words, the
``matrix`` constructor builds a matrix and then this function
exchanges rows for columns, and columns for rows.
EXAMPLES:
The most compelling use of this function is when you have a
collection of lists or vectors that you would like to become the
columns of a matrix. In almost any other situation, the
:func:`matrix`` constructor can probably do the
job just as easily, or easier. ::
sage: col_1 = [1,2,3]
sage: col_2 = [4,5,6]
sage: column_matrix([col_1, col_2])
[1 4]
[2 5]
[3 6]
sage: v1 = vector(QQ, [10, 20])
sage: v2 = vector(QQ, [30, 40])
sage: column_matrix(QQ, [v1, v2])
[10 30]
[20 40]
If you only specify one dimension along with a flat list of entries,
then it will be the number of columns in the result (which is different
from the behavior of the ``matrix`` constructor). ::
sage: column_matrix(ZZ, 8, range(24))
[ 0 3 6 9 12 15 18 21]
[ 1 4 7 10 13 16 19 22]
[ 2 5 8 11 14 17 20 23]
And when you specify two dimensions, then they should be number of
columns first, then the number of rows, which is the reverse of how
they would be specified for the ``matrix`` constructor. ::
sage: column_matrix(QQ, 5, 3, range(15))
[ 0 3 6 9 12]
[ 1 4 7 10 13]
[ 2 5 8 11 14]
And a few unproductive, but illustrative, examples. ::
sage: A = matrix(ZZ, 3, 4, range(12))
sage: B = column_matrix(ZZ, 3, 4, range(12))
sage: A == B.transpose()
True
sage: A = matrix(QQ, 7, 12, range(84))
sage: A == column_matrix(A.columns())
True
sage: A = column_matrix(QQ, matrix(ZZ, 3, 2, range(6)) )
sage: A
[0 2 4]
[1 3 5]
sage: A.parent()
Full MatrixSpace of 2 by 3 dense matrices over Rational Field
"""
return matrix(*args, **kwds).transpose()
@matrix_method
def random_matrix(ring, nrows, ncols=None, algorithm='randomize', implementation=None, *args, **kwds):
r"""
Return a random matrix with entries in a specified ring, and possibly with additional properties.
INPUT:
- ``ring`` -- base ring for entries of the matrix
- ``nrows`` -- Integer; number of rows
- ``ncols`` -- (default: ``None``); number of columns; if ``None``
defaults to ``nrows``
- ``algorithm`` -- (default: ``randomize``); determines what properties
the matrix will have. See examples below for possible additional
arguments.
- ``randomize`` -- create a matrix of random elements from the
base ring, possibly controlling the density of non-zero entries.
- ``echelon_form`` -- creates a matrix in echelon form
- ``echelonizable`` -- creates a matrix that has a predictable
echelon form
- ``subspaces`` -- creates a matrix whose four subspaces, when
explored, have reasonably sized, integral valued, entries.
- ``unimodular`` -- creates a matrix of determinant 1.
- ``diagonalizable`` -- creates a diagonalizable matrix whose
eigenvectors, if computed by hand, will have only integer
entries.
- ``implementation`` -- (``None`` or string or a matrix class) a possible
implementation. See the documentation of the constructor of
:class:`~sage.matrix.matrix_space.MatrixSpace`.
- ``*args, **kwds`` -- arguments and keywords to describe additional
properties. See more detailed documentation below.
.. warning::
Matrices generated are not uniformly distributed. For unimodular
matrices over finite field this function does not even generate
all of them: for example ``Matrix.random(GF(3), 2, algorithm='unimodular')``
never generates ``[[2,0],[0,2]]``. This function is made for
teaching purposes.
.. warning::
An upper bound on the absolute value of the entries may be set
when the ``algorithm`` is ``echelonizable`` or ``unimodular``.
In these cases it is possible for this constructor to fail with
a ``ValueError``. If you *must* have this routine return
successfully, do not set ``upper_bound``. This behavior can
be partially controlled by a ``max_tries`` keyword.
.. note::
When constructing matrices with random entries and no
additional properties (i.e. when ``algorithm='randomize'``),
most of the randomness is controlled by the ``random_element``
method for elements of the base ring of the matrix, so the
documentation of that method may be relevant or useful.
EXAMPLES:
Random integer matrices. With no arguments, the majority of the entries
are zero, -1, and 1, and rarely "large." ::
sage: random_matrix(ZZ, 5, 5)
[ -8 2 0 0 1]
[ -1 2 1 -95 -1]
[ -2 -12 0 0 1]
[ -1 1 -1 -2 -1]
[ 4 -4 -6 5 0]
The ``distribution`` keyword set to ``uniform`` will limit values
between -2 and 2. ::
sage: random_matrix(ZZ, 5, 5, distribution='uniform')
[ 1 0 -2 1 1]
[ 1 0 0 0 2]
[-1 -2 0 2 -2]
[-1 -1 1 1 2]
[ 0 -2 -1 0 0]
The ``x`` and ``y`` keywords can be used to distribute entries uniformly.
When both are used ``x`` is the minimum and ``y`` is one greater than
the maximum. ::
sage: random_matrix(ZZ, 4, 8, x=70, y=100)
[81 82 70 81 78 71 79 94]
[80 98 89 87 91 94 94 77]
[86 89 85 92 95 94 72 89]
[78 80 89 82 94 72 90 92]
sage: random_matrix(ZZ, 3, 7, x=-5, y=5)
[-3 3 1 -5 3 1 2]
[ 3 3 0 3 -5 -2 1]
[ 0 -2 -2 2 -3 -4 -2]
If only ``x`` is given, then it is used as the upper bound of a range
starting at 0. ::
sage: random_matrix(ZZ, 5, 5, x=25)
[20 16 8 3 8]
[ 8 2 2 14 5]
[18 18 10 20 11]
[19 16 17 15 7]
[ 0 24 3 17 24]
To control the number of nonzero entries, use the ``density`` keyword
at a value strictly below the default of 1.0. The ``density`` keyword
is used to compute the number of entries that will be nonzero, but the
same entry may be selected more than once. So the value provided will
be an upper bound for the density of the created matrix. Note that for
a square matrix it is only necessary to set a single dimension. ::
sage: random_matrix(ZZ, 5, x=-10, y=10, density=0.75)
[-6 1 0 0 0]
[ 9 0 0 4 1]
[-6 0 0 -8 0]
[ 0 4 0 6 0]
[ 1 -9 0 0 -8]
sage: random_matrix(ZZ, 5, x=20, y=30, density=0.75)
[ 0 28 0 27 0]
[25 28 20 0 0]
[ 0 21 0 21 0]
[ 0 28 22 0 0]
[ 0 0 0 26 24]
For a matrix with low density it may be advisable to insist on a sparse
representation, as this representation is not selected automatically. ::
sage: A=random_matrix(ZZ, 5, 5)
sage: A.is_sparse()
False
sage: A = random_matrix(ZZ, 5, 5, sparse=True)
sage: A.is_sparse()
True
sage: random_matrix(ZZ, 5, 5, density=0.3, sparse=True)
[ 4 0 0 0 -1]
[ 0 0 0 0 -7]
[ 0 0 2 0 0]
[ 0 0 1 0 -4]
[ 0 0 0 0 0]
For algorithm testing you might want to control the number of bits,
say 10,000 entries, each limited to 16 bits. ::
sage: A = random_matrix(ZZ, 100, 100, x=2^16); A
100 x 100 dense matrix over Integer Ring (use the '.str()' method to see the entries)
One can prescribe a specific matrix implementation::
sage: K.<a> = FiniteField(2^8)
sage: type(random_matrix(K, 2, 5))
<type 'sage.matrix.matrix_gf2e_dense.Matrix_gf2e_dense'>
sage: type(random_matrix(K, 2, 5, implementation="generic"))
<type 'sage.matrix.matrix_generic_dense.Matrix_generic_dense'>
Random rational matrices. Now ``num_bound`` and ``den_bound`` control the
generation of random elements, by specifying limits on the absolute value of
numerators and denominators (respectively). Entries will be positive and
negative (map the absolute value function through the entries to get all
positive values). If either the numerator or denominator bound (or both)
is not used, then the values default to the distribution for ``ZZ``
described above. ::
sage: random_matrix(QQ, 2, 8, num_bound=20, den_bound=4)
[ -1/4 5 5 -9/2 5/3 19 15/2 19/2]
[ 20/3 -13/4 0 16 -5 -20 -11 -7/3]
sage: random_matrix(QQ, 4, density = 0.5, sparse=True)
[ 0 1 0 -1]
[ 0 0 0 0]
[ 6 0 3 0]
[ 1 1/3 0 0]
sage: A = random_matrix(QQ, 3, 10, num_bound = 99, den_bound = 99)
sage: positives = list(map(abs, A.list()))
sage: matrix(QQ, 3, 10, positives)
[ 2/45 40/21 45/46 17/22 1 70/79 97/71 7/24 12/5 13/8]
[ 8/25 1/3 61/14 92/45 4/85 3/38 95/16 82/71 1/5 41/16]
[55/76 19 28/41 52/51 14/3 43 76/13 8/77 13/38 37/21]
sage: random_matrix(QQ, 4, 10, den_bound = 10)
[ 1/9 1/5 -1 2/9 1/4 -1/7 1/8 -1/9 0 2]
[ 2/3 2 1/8 -2 0 0 -2 2 0 -1/2]
[ 0 2 1 -2/3 0 0 1/6 0 -1/3 -2/9]
[ 0 0 2/5 1/9 0 0 1/6 1/10 0 1]
Random matrices over other rings. Several classes of matrices have specialized
``randomize()`` methods. You can locate these with the Sage command::
search_def('randomize')
The default implementation of :meth:`~sage.matrix.matrix2.randomize` relies
on the ``random_element()`` method for the base ring. The ``density`` and
``sparse`` keywords behave as described above. Since we have a different
randomisation when using the optional meataxe package, we have to make sure
that we use the default implementation in this test::
sage: K.<a>=FiniteField(3^2)
sage: random_matrix(K, 2, 5, implementation='generic')
[ a + 1 a + 1 0 2*a + 2 a + 1]
[ a + 2 a + 1 2 0 0]
sage: random_matrix(RR, 3, 4, density=0.66)
[ 0.000000000000000 0.0869697644118808 -0.232952499486647 0.000000000000000]
[-0.793158962467820 0.000000000000000 0.318853016385637 0.000000000000000]
[-0.220342454156035 0.000000000000000 0.000000000000000 0.914890766754157]
sage: A = random_matrix(ComplexField(32), 3, density=0.8, sparse=True); A
[ 0.000000000 -0.443499553 - 0.406854867*I 0.000000000]
[ 0.171578609 + 0.644048756*I 0.518523841 + 0.794429291*I -0.341030168 - 0.507791873*I]
[ 0.000000000 0.000000000 0.782759943 + 0.236288982*I]
sage: A.is_sparse()
True
Random matrices in echelon form. The ``algorithm='echelon_form'`` keyword,
along with a requested number of non-zero rows (``num_pivots``) will return
a random matrix in echelon form. When the base ring is ``QQ`` the result has integer
entries. Other exact rings may be also specified. ::
sage: A = random_matrix(QQ, 4, 8, algorithm='echelon_form', num_pivots=3); A # random
[ 1 -5 0 -2 0 1 1 -2]
[ 0 0 1 -5 0 -3 -1 0]
[ 0 0 0 0 1 2 -2 1]
[ 0 0 0 0 0 0 0 0]
sage: A.base_ring()
Rational Field
sage: (A.nrows(), A.ncols())
(4, 8)
sage: A in sage.matrix.matrix_space.MatrixSpace(ZZ, 4, 8)
True
sage: A.rank()
3
sage: A == A.rref()
True
For more, see the documentation of the :func:`~sage.matrix.constructor.random_rref_matrix`
function. In the notebook or at the Sage command-line, first execute the following to make
this further documentation available::
from sage.matrix.constructor import random_rref_matrix
Random matrices with predictable echelon forms. The ``algorithm='echelonizable'``
keyword, along with a requested rank (``rank``) and optional size control
(``upper_bound``) will return a random matrix in echelon form. When the
base ring is ``ZZ`` or ``QQ`` the result has integer entries, whose magnitudes
can be limited by the value of ``upper_bound``, and the echelon form of the
matrix also has integer entries. Other exact rings may be also
specified, but there is no notion of controlling the size. Square matrices
of full rank generated by this function always have determinant one, and
can be constructed with the ``unimodular`` keyword. ::
sage: A = random_matrix(QQ, 4, 8, algorithm='echelonizable', rank=3, upper_bound=60); A # random
sage: A.base_ring()
Rational Field
sage: (A.nrows(), A.ncols())
(4, 8)
sage: A in sage.matrix.matrix_space.MatrixSpace(ZZ, 4, 8)
True
sage: A.rank()
3
sage: all(abs(x)<60 for x in A.list())
True
sage: A.rref() in sage.matrix.matrix_space.MatrixSpace(ZZ, 4, 8)
True
For more, see the documentation of the :func:`~sage.matrix.constructor.random_echelonizable_matrix`
function. In the notebook or at the Sage command-line, first execute the following to make
this further documentation available::
from sage.matrix.constructor import random_echelonizable_matrix
Random diagonalizable matrices. The ``algorithm='diagonalizable'`` keyword,
along with a requested matrix size (``size``) and optional lists of
eigenvalues (``eigenvalues``) and the corresponding eigenspace
dimensions (``dimensions``) will return a random diagonalizable matrix.
When the eigenvalues and dimensions are not specified the result will have
randomly generated values for both that fit with the designated size. ::
sage: A = random_matrix(QQ, 5, algorithm='diagonalizable', eigenvalues=[2,3,-1], dimensions=[1,2,2]); A # random
sage: all(x in ZZ for x in (A-(2*identity_matrix(5))).rref().list())
True
sage: all(x in ZZ for x in (A-(3*identity_matrix(5))).rref().list())
True
sage: all(x in ZZ for x in (A-(-1*identity_matrix(5))).rref().list())
True
sage: A.jordan_form()
[ 2| 0| 0| 0| 0]
[--+--+--+--+--]
[ 0| 3| 0| 0| 0]
[--+--+--+--+--]
[ 0| 0| 3| 0| 0]
[--+--+--+--+--]
[ 0| 0| 0|-1| 0]
[--+--+--+--+--]
[ 0| 0| 0| 0|-1]
For more, see the documentation of the :func:`~sage.matrix.constructor.random_diagonalizable_matrix`
function. In the notebook or at the Sage command-line, first execute the following to make
this further documentation available::
from sage.matrix.constructor import random_diagonalizable_matrix
Random matrices with predictable subspaces. The ``algorithm='subspaces'``
keyword, along with an optional rank (``rank``) will return
a matrix whose natural basis vectors for its four fundamental subspaces, if computed as
described in the documentation of the :func:`~sage.matrix.constructor.random_subspaces_matrix`
contain only integer entries. If ``rank``, is not set, the
rank of the matrix will be generated randomly. ::
sage: B = random_matrix(QQ, 5, 6, algorithm='subspaces', rank=3); B #random
sage: B_expanded=B.augment(identity_matrix(5)).rref()
sage: (B.nrows(), B.ncols())
(5, 6)
sage: all(x in ZZ for x in B_expanded.list())
True
sage: C=B_expanded.submatrix(0,0,B.nrows()-B.nullity(),B.ncols())
sage: L=B_expanded.submatrix(B.nrows()-B.nullity(),B.ncols())
sage: B.right_kernel() == C.right_kernel()
True
sage: B.row_space() == C.row_space()
True
sage: B.column_space() == L.right_kernel()
True
sage: B.left_kernel() == L.row_space()
True
For more, see the documentation of the :func:`~sage.matrix.constructor.random_subspaces_matrix`
function. In the notebook or at the Sage command-line, first execute the following to make
this further documentation available::
from sage.matrix.constructor import random_subspaces_matrix
Random unimodular matrices. The ``algorithm='unimodular'``
keyword, along with an optional entry size control (``upper_bound``)
will return a matrix of determinant 1. When the base ring is ``ZZ``
or ``QQ`` the result has integer entries, whose magnitudes
can be limited by the value of ``upper_bound``. ::
sage: C=random_matrix(QQ, 5, algorithm='unimodular', upper_bound=70); C # random
sage: det(C)
1
sage: C.base_ring()
Rational Field
sage: (C.nrows(), C.ncols())
(5, 5)
sage: all(abs(x)<70 for x in C.list())
True
For more, see the documentation of the :func:`~sage.matrix.constructor.random_unimodular_matrix`
function. In the notebook or at the Sage command-line, first execute the following to make
this further documentation available::
from sage.matrix.constructor import random_unimodular_matrix
TESTS:
We return an error for a bogus value of ``algorithm``::
sage: random_matrix(ZZ, 5, algorithm = 'bogus')
Traceback (most recent call last):
...
ValueError: random matrix algorithm "bogus" is not recognized
AUTHOR:
- William Stein (2007-02-06)
- Rob Beezer (2010-08-25) Documentation, code to allow additional types of output
"""
if ncols is None:
ncols = nrows
sparse = kwds.pop('sparse', False)
# Construct the parent of the desired matrix
parent = matrix_space.MatrixSpace(ring, nrows, ncols, sparse=sparse, implementation=implementation)
if algorithm == 'randomize':
density = kwds.pop('density', None)
# zero matrix is immutable, copy is mutable
A = copy(parent.zero_matrix())
if density is None:
A.randomize(density=float(1), nonzero=False, *args, **kwds)
else:
A.randomize(density=density, nonzero=True, *args, **kwds)
return A
elif algorithm == 'echelon_form':
return random_rref_matrix(parent, *args, **kwds)
elif algorithm == 'echelonizable':
return random_echelonizable_matrix(parent, *args, **kwds)
elif algorithm == 'diagonalizable':
return random_diagonalizable_matrix(parent, *args, **kwds)
elif algorithm == 'subspaces':
return random_subspaces_matrix(parent, *args, **kwds)
elif algorithm == 'unimodular':
return random_unimodular_matrix(parent, *args, **kwds)
else:
raise ValueError('random matrix algorithm "%s" is not recognized' % algorithm)
@matrix_method
def diagonal_matrix(arg0=None, arg1=None, arg2=None, sparse=True):
r"""
Return a square matrix with specified diagonal entries, and zeros elsewhere.
FORMATS:
1. diagonal_matrix(entries)
2. diagonal_matrix(nrows, entries)
3. diagonal_matrix(ring, entries)
4. diagonal_matrix(ring, nrows, entries)
INPUT:
- ``entries`` - the values to place along the diagonal
of the returned matrix. This may be a flat list, a
flat tuple, a vector or free module element, or
a one-dimensional NumPy array.
- ``nrows`` - the size of the returned matrix, which
will have an equal number of columns
- ``ring`` - the ring containing the entries of the
diagonal entries. This may not be specified in
combination with a NumPy array.
- ``sparse`` - default: ``True`` - whether or not
the result has a sparse implementation.
OUTPUT:
A square matrix over the given ``ring`` with a size
given by ``nrows``. If the ring is not given it
is inferred from the given entries. The values on
the diagonal of the returned matrix come from ``entries``.
If the number of entries is not enough to fill the whole
diagonal, it is padded with zeros.
EXAMPLES:
We first demonstrate each of the input formats with various
different ways to specify the entries.
Format 1: a flat list of entries. ::
sage: A = diagonal_matrix([2, 1.3, 5]); A
[ 2.00000000000000 0.000000000000000 0.000000000000000]
[0.000000000000000 1.30000000000000 0.000000000000000]
[0.000000000000000 0.000000000000000 5.00000000000000]
sage: A.parent()
Full MatrixSpace of 3 by 3 sparse matrices over Real Field with 53 bits of precision
Format 2: size specified, a tuple with initial entries. Note that a short list of entries
is effectively padded with zeros. ::
sage: A = diagonal_matrix(3, (4, 5)); A
[4 0 0]
[0 5 0]
[0 0 0]
sage: A.parent()
Full MatrixSpace of 3 by 3 sparse matrices over Integer Ring
Format 3: ring specified, a vector of entries. ::
sage: A = diagonal_matrix(QQ, vector(ZZ, [1,2,3])); A
[1 0 0]
[0 2 0]
[0 0 3]
sage: A.parent()
Full MatrixSpace of 3 by 3 sparse matrices over Rational Field
Format 4: ring, size and list of entries. ::
sage: A = diagonal_matrix(FiniteField(3), 3, [2, 16]); A
[2 0 0]
[0 1 0]
[0 0 0]
sage: A.parent()
Full MatrixSpace of 3 by 3 sparse matrices over Finite Field of size 3
NumPy arrays may be used as input. ::
sage: import numpy
sage: entries = numpy.array([1.2, 5.6]); entries
array([1.2, 5.6])
sage: A = diagonal_matrix(3, entries); A
[1.2 0.0 0.0]
[0.0 5.6 0.0]
[0.0 0.0 0.0]
sage: A.parent()
Full MatrixSpace of 3 by 3 sparse matrices over Real Double Field
sage: j = numpy.complex(0,1)
sage: entries = numpy.array([2.0+j, 8.1, 3.4+2.6*j]); entries
array([2. +1.j , 8.1+0.j , 3.4+2.6j])
sage: A = diagonal_matrix(entries); A
[2.0 + 1.0*I 0.0 0.0]
[ 0.0 8.1 0.0]
[ 0.0 0.0 3.4 + 2.6*I]
sage: A.parent()
Full MatrixSpace of 3 by 3 sparse matrices over Complex Double Field
sage: entries = numpy.array([4, 5, 6])
sage: A = diagonal_matrix(entries); A
[4 0 0]
[0 5 0]
[0 0 6]
sage: A.parent()
Full MatrixSpace of 3 by 3 sparse matrices over Integer Ring
sage: entries = numpy.array([4.1, 5.2, 6.3])
sage: A = diagonal_matrix(ZZ, entries); A
Traceback (most recent call last):
...
TypeError: unable to convert 4.1 to an element of Integer Ring
By default returned matrices have a sparse implementation. This can be changed
when using any of the formats. ::
sage: A = diagonal_matrix([1,2,3], sparse=False)
sage: A.parent()
Full MatrixSpace of 3 by 3 dense matrices over Integer Ring
An empty list and no ring specified defaults to the integers. ::
sage: A = diagonal_matrix([])
sage: A.parent()
Full MatrixSpace of 0 by 0 sparse matrices over Integer Ring
Giving the entries improperly may first complain about not being iterable::
sage: diagonal_matrix(QQ, 5, 10)
Traceback (most recent call last):
...
TypeError: 'sage.rings.integer.Integer' object is not iterable
Giving too many entries will raise an error. ::
sage: diagonal_matrix(QQ, 3, [1,2,3,4])
Traceback (most recent call last):
...
ValueError: number of diagonal matrix entries (4) exceeds the requested matrix size (3)
A negative size sometimes causes the error that there are too many elements. ::
sage: diagonal_matrix(-2, [2])
Traceback (most recent call last):
...
ValueError: number of diagonal matrix entries (1) exceeds the requested matrix size (-2)
Types for the entries need to be iterable (tuple, list, vector, NumPy array,
etc)::
sage: diagonal_matrix(x^2)
Traceback (most recent call last):
...
TypeError: 'sage.symbolic.expression.Expression' object is not iterable
TESTS::
sage: A = diagonal_matrix(reversed(range(4)))
AUTHOR:
- Rob Beezer (2011-01-11): total rewrite
"""
# Roll arguments leftward
#
# Leads with a ring?
# Formats 3, 4, else remains None
ring = None
if is_Ring(arg0):
ring = arg0
arg0 = arg1
arg1 = arg2
# Size of matrix specified?
# Formats 2, 4
nrows = None
if isinstance(arg0, (Integer, int)):
nrows = arg0
arg0 = arg1
# Object holding entries
# Formats 1, 2, 3, 4
entries = arg0
# sanity check for entries
from numpy import ndarray
if not isinstance(entries, (list, tuple, ndarray)):
entries = list(entries)
# Reconcile matrix size and number of entries
try:
nentries = len(entries)
except TypeError:
raise TypeError('unable to determine number of entries for diagonal matrix construction')
# sometimes catches a negative size
if nrows is not None and nentries > nrows:
raise ValueError('number of diagonal matrix entries (%s) exceeds the requested matrix size (%s)' % (nentries, nrows))
if nrows is None:
nrows = nentries
# provide a default ring for an empty list
if not len(entries) and ring is None:
ring = ZZ
# Convert entries to a list v over a common ring
from sage.modules.free_module_element import prepare
v, ring = prepare(entries, ring)
# Create a "diagonal" dictionary for matrix constructor
# If nentries < nrows, diagonal is effectively padded with zeros at end
w = {(i, i): v[i] for i in range(len(v))}
# Ship ring, matrix size, dictionary to matrix constructor
if ring is None:
return matrix(nrows, nrows, w, sparse=sparse)
else:
return matrix(ring, nrows, nrows, w, sparse=sparse)
@matrix_method
def identity_matrix(ring, n=0, sparse=False):
r"""
Return the `n \times n` identity matrix over the given
ring.
The default ring is the integers.
EXAMPLES::
sage: M = identity_matrix(QQ, 2); M
[1 0]
[0 1]
sage: M.parent()
Full MatrixSpace of 2 by 2 dense matrices over Rational Field
sage: M = identity_matrix(2); M
[1 0]
[0 1]
sage: M.parent()
Full MatrixSpace of 2 by 2 dense matrices over Integer Ring
sage: M.is_mutable()
True
sage: M = identity_matrix(3, sparse=True); M
[1 0 0]
[0 1 0]
[0 0 1]
sage: M.parent()
Full MatrixSpace of 3 by 3 sparse matrices over Integer Ring
sage: M.is_mutable()
True
"""
if isinstance(ring, (Integer, int)):
n = ring
ring = ZZ
return matrix_space.MatrixSpace(ring, n, n, sparse)(1)
@matrix_method
def lehmer(ring, n=0):
r"""
Return the `n \times n` Lehmer matrix.
The default ring is the rationals.
Element `(i, j)` in the Lehmer matrix is
`min(i, j)/max(i, j)`.
See :wikipedia:`Lehmer_matrix`.
EXAMPLES::
sage: matrix.lehmer(3)
[ 1 1/2 1/3]
[1/2 1 2/3]
[1/3 2/3 1]
"""
from sage.sets.integer_range import IntegerRange
if isinstance(ring, (Integer, int)):
n = ring
ring = QQ
return matrix_space.MatrixSpace(ring, n, n).matrix([[min(i, j)/max(i, j) for i in IntegerRange(1, n+1)] for j in IntegerRange(1, n+1)])
@matrix_method
def zero_matrix(ring, nrows=None, ncols=None, sparse=False):
r"""
Return the `nrows \times ncols` zero matrix over the given
ring.
The default ring is the integers.
EXAMPLES::
sage: M = zero_matrix(QQ, 2); M
[0 0]
[0 0]
sage: M.parent()
Full MatrixSpace of 2 by 2 dense matrices over Rational Field
sage: M = zero_matrix(2, 3); M
[0 0 0]
[0 0 0]
sage: M.parent()
Full MatrixSpace of 2 by 3 dense matrices over Integer Ring
sage: M.is_mutable()
True
sage: M = zero_matrix(3, 1, sparse=True); M
[0]
[0]
[0]
sage: M.parent()
Full MatrixSpace of 3 by 1 sparse matrices over Integer Ring
sage: M.is_mutable()
True
sage: matrix.zero(5)
[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]
[0 0 0 0 0]
"""
if isinstance(ring, (Integer, int)):
nrows, ncols = (ring, nrows)
ring = ZZ
return matrix_space.MatrixSpace(ring, nrows, ncols, sparse)(0)
@matrix_method
def ones_matrix(ring, nrows=None, ncols=None, sparse=False):
r"""
Return a matrix with all entries equal to 1.
CALL FORMATS:
In each case, the optional keyword ``sparse`` can be used.
1. ones_matrix(ring, nrows, ncols)
2. ones_matrix(ring, nrows)
3. ones_matrix(nrows, ncols)
4. ones_matrix(nrows)
INPUT:
- ``ring`` - default: ``ZZ`` - base ring for the matrix.
- ``nrows`` - number of rows in the matrix.
- ``ncols`` - number of columns in the matrix.
If omitted, defaults to the number of rows, producing a square matrix.
- ``sparse`` - default: ``False`` - if ``True`` creates a sparse representation.
OUTPUT:
A matrix of size ``nrows`` by ``ncols`` over the ``ring`` with every
entry equal to 1. While the result is far from sparse, you may wish
to choose a sparse representation when mixing this matrix with
other sparse matrices.
EXAMPLES:
A call specifying the ring and the size. ::
sage: M= ones_matrix(QQ, 2, 5); M
[1 1 1 1 1]
[1 1 1 1 1]
sage: M.parent()
Full MatrixSpace of 2 by 5 dense matrices over Rational Field
Without specifying the number of columns, the result is square. ::
sage: M = ones_matrix(RR, 2); M
[1.00000000000000 1.00000000000000]
[1.00000000000000 1.00000000000000]
sage: M.parent()
Full MatrixSpace of 2 by 2 dense matrices over Real Field with 53 bits of precision
The ring defaults to the integers if not given. ::
sage: M = ones_matrix(2, 3); M
[1 1 1]
[1 1 1]
sage: M.parent()
Full MatrixSpace of 2 by 3 dense matrices over Integer Ring
A lone integer input produces a square matrix over the integers. ::
sage: M = ones_matrix(3); M
[1 1 1]
[1 1 1]
[1 1 1]
sage: M.parent()
Full MatrixSpace of 3 by 3 dense matrices over Integer Ring
The result can have a sparse implementation. ::
sage: M = ones_matrix(3, 1, sparse=True); M
[1]
[1]
[1]
sage: M.parent()
Full MatrixSpace of 3 by 1 sparse matrices over Integer Ring
Giving just a ring will yield an error. ::
sage: ones_matrix(CC)
Traceback (most recent call last):
...
ValueError: constructing an all ones matrix requires at least one dimension
"""
if isinstance(ring, (Integer, int)):
nrows, ncols = (ring, nrows)
ring = ZZ
if nrows is None:
raise ValueError("constructing an all ones matrix requires at least one dimension")
if ncols is None:
nents = nrows**2
else:
nents = nrows*ncols
one = ring(1)
return matrix_space.MatrixSpace(ring, nrows, ncols, sparse).matrix([one]*nents)
@matrix_method
def elementary_matrix(arg0, arg1=None, **kwds):
r"""
Creates a square matrix that corresponds to a row operation or a column operation.
FORMATS:
In each case, ``R`` is the base ring, and is optional. ``n`` is the size
of the square matrix created. Any call may include the ``sparse`` keyword
to determine the representation used. The default is ``False`` which
leads to a dense representation. We describe the matrices by their
associated row operation, see the output description for more.
- ``elementary_matrix(R, n, row1=i, row2=j)``
The matrix which swaps rows ``i`` and ``j``.
- ``elementary_matrix(R, n, row1=i, scale=s)``
The matrix which multiplies row ``i`` by ``s``.
- ``elementary_matrix(R, n, row1=i, row2=j, scale=s)``
The matrix which multiplies row ``j`` by ``s``
and adds it to row ``i``.
Elementary matrices representing column operations are created
in an entirely analogous way, replacing ``row1`` by ``col1`` and
replacing ``row2`` by ``col2``.
Specifying the ring for entries of the matrix is optional. If it
is not given, and a scale parameter is provided, then a ring containing
the value of ``scale`` will be used. Otherwise, the ring defaults
to the integers.
OUTPUT:
An elementary matrix is a square matrix that is very close to being
an identity matrix. If ``E`` is an elementary matrix and ``A`` is any
matrix with the same number of rows, then ``E*A`` is the result of
applying a row operation to ``A``. This is how the three types
created by this function are described. Similarly, an elementary matrix
can be associated with a column operation, so if ``E`` has the same number
of columns as ``A`` then ``A*E`` is the result of performing a column
operation on ``A``.
An elementary matrix representing a row operation is created if ``row1``
is specified, while an elementary matrix representing a column operation
is created if ``col1`` is specified.
EXAMPLES:
Over the integers, creating row operations. Recall that row
and column numbering begins at zero. ::
sage: A = matrix(ZZ, 4, 10, range(40)); A
[ 0 1 2 3 4 5 6 7 8 9]
[10 11 12 13 14 15 16 17 18 19]
[20 21 22 23 24 25 26 27 28 29]
[30 31 32 33 34 35 36 37 38 39]
sage: E = elementary_matrix(4, row1=1, row2=3); E
[1 0 0 0]
[0 0 0 1]
[0 0 1 0]
[0 1 0 0]
sage: E*A
[ 0 1 2 3 4 5 6 7 8 9]
[30 31 32 33 34 35 36 37 38 39]
[20 21 22 23 24 25 26 27 28 29]
[10 11 12 13 14 15 16 17 18 19]
sage: E = elementary_matrix(4, row1=2, scale=10); E
[ 1 0 0 0]
[ 0 1 0 0]
[ 0 0 10 0]
[ 0 0 0 1]
sage: E*A
[ 0 1 2 3 4 5 6 7 8 9]
[ 10 11 12 13 14 15 16 17 18 19]
[200 210 220 230 240 250 260 270 280 290]
[ 30 31 32 33 34 35 36 37 38 39]
sage: E = elementary_matrix(4, row1=2, row2=1, scale=10); E
[ 1 0 0 0]
[ 0 1 0 0]
[ 0 10 1 0]
[ 0 0 0 1]
sage: E*A
[ 0 1 2 3 4 5 6 7 8 9]
[ 10 11 12 13 14 15 16 17 18 19]
[120 131 142 153 164 175 186 197 208 219]
[ 30 31 32 33 34 35 36 37 38 39]
Over the rationals, now as column operations. Recall that row
and column numbering begins at zero. Checks now have the
elementary matrix on the right. ::
sage: A = matrix(QQ, 5, 4, range(20)); A
[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]
[12 13 14 15]
[16 17 18 19]
sage: E = elementary_matrix(QQ, 4, col1=1, col2=3); E
[1 0 0 0]
[0 0 0 1]
[0 0 1 0]
[0 1 0 0]
sage: A*E
[ 0 3 2 1]
[ 4 7 6 5]
[ 8 11 10 9]
[12 15 14 13]
[16 19 18 17]
sage: E = elementary_matrix(QQ, 4, col1=2, scale=1/2); E
[ 1 0 0 0]
[ 0 1 0 0]
[ 0 0 1/2 0]
[ 0 0 0 1]
sage: A*E
[ 0 1 1 3]
[ 4 5 3 7]
[ 8 9 5 11]
[12 13 7 15]
[16 17 9 19]
sage: E = elementary_matrix(QQ, 4, col1=2, col2=1, scale=10); E
[ 1 0 0 0]
[ 0 1 10 0]
[ 0 0 1 0]
[ 0 0 0 1]
sage: A*E
[ 0 1 12 3]
[ 4 5 56 7]
[ 8 9 100 11]
[ 12 13 144 15]
[ 16 17 188 19]
An elementary matrix is always nonsingular. Then repeated row
operations can be represented by products of elementary matrices,
and this product is again nonsingular. If row operations are to
preserve fundamental properties of a matrix (like rank), we do not
allow scaling a row by zero. Similarly, the corresponding elementary
matrix is not constructed. Also, we do not allow adding a multiple
of a row to itself, since this could also lead to a new zero row. ::
sage: A = matrix(QQ, 4, 10, range(40)); A
[ 0 1 2 3 4 5 6 7 8 9]
[10 11 12 13 14 15 16 17 18 19]
[20 21 22 23 24 25 26 27 28 29]
[30 31 32 33 34 35 36 37 38 39]
sage: E1 = elementary_matrix(QQ, 4, row1=0, row2=1)
sage: E2 = elementary_matrix(QQ, 4, row1=3, row2=0, scale=100)
sage: E = E2*E1
sage: E.is_singular()
False
sage: E*A
[ 10 11 12 13 14 15 16 17 18 19]
[ 0 1 2 3 4 5 6 7 8 9]
[ 20 21 22 23 24 25 26 27 28 29]
[1030 1131 1232 1333 1434 1535 1636 1737 1838 1939]
sage: E3 = elementary_matrix(QQ, 4, row1=3, scale=0)
Traceback (most recent call last):
...
ValueError: scale parameter of row of elementary matrix must be non-zero
sage: E4 = elementary_matrix(QQ, 4, row1=3, row2=3, scale=12)
Traceback (most recent call last):
...
ValueError: cannot add a multiple of a row to itself
If the ring is not specified, and a scale parameter is given, the
base ring for the matrix is chosen to contain the scale parameter.
Otherwise, if no ring is given, the default is the integers. ::
sage: E = elementary_matrix(4, row1=1, row2=3)
sage: E.parent()
Full MatrixSpace of 4 by 4 dense matrices over Integer Ring
sage: E = elementary_matrix(4, row1=1, scale=4/3)
sage: E.parent()
Full MatrixSpace of 4 by 4 dense matrices over Rational Field
sage: E = elementary_matrix(4, row1=1, scale=I)
sage: E.parent()
Full MatrixSpace of 4 by 4 dense matrices over Number Field in I with defining polynomial x^2 + 1 with I = 1*I
sage: E = elementary_matrix(4, row1=1, scale=CDF(I))
sage: E.parent()
Full MatrixSpace of 4 by 4 dense matrices over Complex Double Field
sage: E = elementary_matrix(4, row1=1, scale=QQbar(I))
sage: E.parent()
Full MatrixSpace of 4 by 4 dense matrices over Algebraic Field
Returned matrices have a dense implementation by default,
but a sparse implementation may be requested. ::
sage: E = elementary_matrix(4, row1=0, row2=1)
sage: E.is_dense()
True
sage: E = elementary_matrix(4, row1=0, row2=1, sparse=True)
sage: E.is_sparse()
True
And the ridiculously small cases. The zero-row matrix cannot be built
since then there are no rows to manipulate. ::
sage: elementary_matrix(QQ, 1, row1=0, row2=0)
[1]
sage: elementary_matrix(QQ, 0, row1=0, row2=0)
Traceback (most recent call last):
...
ValueError: size of elementary matrix must be 1 or greater, not 0
TESTS::
sage: E = elementary_matrix('junk', 5, row1=3, row2=1, scale=12)
Traceback (most recent call last):
...
TypeError: optional first parameter must be a ring, not junk
sage: E = elementary_matrix(5, row1=3, scale='junk')
Traceback (most recent call last):
...
TypeError: scale must be an element of some ring, not junk
sage: E = elementary_matrix(ZZ, 5, row1=3, col2=3, scale=12)
Traceback (most recent call last):
...
ValueError: received an unexpected keyword: col2=3
sage: E = elementary_matrix(QQ, row1=3, scale=12)
Traceback (most recent call last):
...
ValueError: size of elementary matrix must be given
sage: E = elementary_matrix(ZZ, 4/3, row1=3, row2=1, scale=12)
Traceback (most recent call last):
...
TypeError: size of elementary matrix must be an integer, not 4/3
sage: E = elementary_matrix(ZZ, -3, row1=3, row2=1, scale=12)
Traceback (most recent call last):
...
ValueError: size of elementary matrix must be 1 or greater, not -3
sage: E = elementary_matrix(ZZ, 5, row2=1, scale=12)
Traceback (most recent call last):
...
ValueError: row1 or col1 must be specified
sage: E = elementary_matrix(ZZ, 5, row1=3, col1=3, scale=12)
Traceback (most recent call last):
...
ValueError: cannot specify both row1 and col1
sage: E = elementary_matrix(ZZ, 5, row1=4/3, row2=1, scale=12)
Traceback (most recent call last):
...
TypeError: row of elementary matrix must be an integer, not 4/3
sage: E = elementary_matrix(ZZ, 5, col1=5, col2=1, scale=12)
Traceback (most recent call last):
...
ValueError: column of elementary matrix must be positive and smaller than 5, not 5
sage: E = elementary_matrix(ZZ, 5, col1=3, col2=4/3, scale=12)
Traceback (most recent call last):
...
TypeError: column of elementary matrix must be an integer, not 4/3
sage: E = elementary_matrix(ZZ, 5, row1=3, row2=-1, scale=12)
Traceback (most recent call last):
...
ValueError: row of elementary matrix must be positive and smaller than 5, not -1
sage: E = elementary_matrix(ZZ, 5, row1=3, row2=1, scale=4/3)
Traceback (most recent call last):
...
TypeError: scale parameter of elementary matrix must an element of Integer Ring, not 4/3
sage: E = elementary_matrix(ZZ, 5, row1=3)
Traceback (most recent call last):
...
ValueError: insufficient parameters provided to construct elementary matrix
sage: E = elementary_matrix(ZZ, 5, row1=3, row2=3, scale=12)
Traceback (most recent call last):
...
ValueError: cannot add a multiple of a row to itself
sage: E = elementary_matrix(ZZ, 5, col1=3, scale=0)
Traceback (most recent call last):
...
ValueError: scale parameter of column of elementary matrix must be non-zero
AUTHOR:
- Rob Beezer (2011-03-04)
"""
import sage.structure.element
# determine ring and matrix size
if arg1 is not None and not is_Ring(arg0):
raise TypeError('optional first parameter must be a ring, not {0}'.format(arg0))
scale = kwds.pop('scale', None)
if is_Ring(arg0):
R = arg0
arg0 = arg1
elif scale is not None:
if not sage.structure.element.is_RingElement(scale):
raise TypeError('scale must be an element of some ring, not {0}'.format(scale))
R = scale.parent()
else:
R = ZZ
if arg0 is None:
raise ValueError('size of elementary matrix must be given')
try:
n = Integer(arg0)
except TypeError:
raise TypeError('size of elementary matrix must be an integer, not {0}'.format(arg0))
if n <= 0:
raise ValueError('size of elementary matrix must be 1 or greater, not {0}'.format(n))
# row operations or column operations?
# column operation matrix will be transpose of a row operation matrix
row1 = kwds.pop('row1', None)
col1 = kwds.pop('col1', None)
if row1 is None and col1 is None:
raise ValueError('row1 or col1 must be specified')
if row1 is not None and col1 is not None:
raise ValueError('cannot specify both row1 and col1')
rowop = row1 is not None
if rowop:
opstring = "row"
row2 = kwds.pop('row2', None)
else:
opstring = "column"
row1 = col1
row2 = kwds.pop('col2', None)
sparse = kwds.pop('sparse', False)
if kwds:
extra = kwds.popitem()
raise ValueError('received an unexpected keyword: {0}={1}'.format(extra[0], extra[1]))
# analyze parameters to determine matrix type
try:
row1 = Integer(row1)
except TypeError:
raise TypeError('{0} of elementary matrix must be an integer, not {1}'.format(opstring, row1))
if row1 < 0 or row1 >= n:
raise ValueError('{0} of elementary matrix must be positive and smaller than {1}, not {2}'.format(opstring, n, row1))
if row2 is not None:
try:
row2 = Integer(row2)
except TypeError:
raise TypeError('{0} of elementary matrix must be an integer, not {1}'.format(opstring, row2))
if row2 < 0 or row2 >= n:
raise ValueError('{0} of elementary matrix must be positive and smaller than {1}, not {2}'.format(opstring, n, row2))
if scale is not None:
try:
scale = R(scale)
except Exception:
raise TypeError('scale parameter of elementary matrix must an element of {0}, not {1}'.format(R, scale))
# determine type of matrix and adjust an identity matrix
# return row operation matrix or the transpose as a column operation matrix
elem = identity_matrix(R, n, sparse=sparse)
if row2 is None and scale is None:
raise ValueError('insufficient parameters provided to construct elementary matrix')
elif row2 is not None and scale is not None:
if row1 == row2:
raise ValueError('cannot add a multiple of a {0} to itself'.format(opstring))
elem[row1, row2] = scale
elif row2 is not None and scale is None:
elem[row1, row1] = 0
elem[row2, row2] = 0
elem[row1, row2] = 1
elem[row2, row1] = 1
elif row2 is None and scale is not None:
if scale == 0:
raise ValueError('scale parameter of {0} of elementary matrix must be non-zero'.format(opstring))
elem[row1, row1] = scale
if rowop:
return elem
else:
return elem.transpose()
@matrix_method
def circulant(v, sparse=None):
r"""
Return the circulant matrix specified by its 1st row `v`
A circulant `n \times n` matrix specified by the 1st row `v=(v_0...v_{n-1})` is
the matrix $(c_{ij})_{0 \leq i,j\leq n-1}$, where $c_{ij}=v_{j-i \mod b}$.
INPUT:
- ``v`` -- a list or a vector of values
- ``sparse`` -- ``None`` by default; if ``sparse`` is set to ``True``, the output
will be sparse. Respectively, setting it to ``False`` produces dense output.
If ``sparse`` is not set, and if ``v`` is a vector, the output sparsity is determined
by the sparsity of ``v``; else, the output will be dense.
EXAMPLES::
sage: v=[1,2,3,4,8]
sage: matrix.circulant(v)
[1 2 3 4 8]
[8 1 2 3 4]
[4 8 1 2 3]
[3 4 8 1 2]
[2 3 4 8 1]
sage: m = matrix.circulant(vector(GF(3),[0,1,-1],sparse=True)); m
[0 1 2]
[2 0 1]
[1 2 0]
sage: m.is_sparse()
True
TESTS::
sage: m = matrix.circulant(vector(GF(3),[0,1,-1],sparse=False))
sage: m.is_sparse()
False
sage: matrix.circulant([0,1,-1]).is_sparse()
False
sage: matrix.circulant([0,1,-1], sparse=True).is_sparse()
True
"""
if sparse is None:
try:
sparse = v.is_sparse()
except AttributeError:
sparse = False
n = len(v)
return matrix(n, n, lambda i, j: v[(j - i) % n], sparse=sparse)
def _determine_block_matrix_grid(sub_matrices):
r"""
For internal use. This tries to determine the dimensions
of rows/columns when assembling the matrices in sub_matrices in a
rectangular grid. It returns a pair of lists containing
respectively the sizes of rows and columns.
sub_matrices must be a list of lists of matrices. All sublists
are expected to be the same size.
Non-zero scalars are considered to be square matrices of any size,
and zeroes are considered to be zero matrices of any size.
A ValueError is raised if there is insufficient or
conflicting information.
TESTS::
sage: from sage.matrix.special import _determine_block_matrix_grid
sage: A = matrix(QQ, 2, 2, [3,9,6,10])
sage: _determine_block_matrix_grid([[A, A], [A, A]])
([2, 2], [2, 2])
sage: B = matrix(QQ, 1, 1, [ 1 ] )
sage: C = matrix(QQ, 2, 2, [ 2, 3, 4, 5 ] )
sage: _determine_block_matrix_grid([[B, 0], [0, C]])
([1, 2], [1, 2])
"""
nrows = len(sub_matrices)
if nrows == 0:
return ([], [])
ncols = len(sub_matrices[0])
if ncols == 0:
return ([0] * nrows, [])
row_heights = [None] * nrows
col_widths = [None] * ncols
changing = True
while changing:
changing = False
for i in range(nrows):
for j in range(ncols):
M = sub_matrices[i][j]
sub_width = None
sub_height = None
if is_Matrix(M):
sub_width = M.ncols()
sub_height = M.nrows()
elif M: # non-zero scalar is interpreted as a square matrix
if row_heights[i] is None:
sub_width = col_widths[j]
else:
sub_width = row_heights[i]
sub_height = sub_width
if sub_width is not None:
if col_widths[j] is None:
changing = True
col_widths[j] = sub_width
elif col_widths[j] != sub_width:
raise ValueError("incompatible submatrix widths")
if sub_height is not None:
if row_heights[i] is None:
changing = True
row_heights[i] = sub_height
elif row_heights[i] != sub_height:
raise ValueError("incompatible submatrix heights")
if None in row_heights or None in col_widths:
if None in row_heights or None in col_widths:
raise ValueError("insufficient information to determine dimensions.")
return (row_heights, col_widths)
def _determine_block_matrix_rows(sub_matrices):
"""
For internal use. This tests if the matrices in sub_matrices
fit in a rectangular matrix when assembled a row at a time.
sub_matrices must be a list of lists of matrices.
It returns a pair (row_heights, zero_widths, width) where
row_heights is the list of row heights, zero_widths is the
total width filled up by zero matrices per row, and width
is the total width of the resulting matrix.
Non-zero scalars are considered to be square matrices of any size,
and zeroes are considered to be zero matrices of any size.
A ``ValueError`` is raised if there is insufficient or
conflicting information.
TESTS::
sage: from sage.matrix.special import _determine_block_matrix_rows
sage: A = Matrix(ZZ, 1, 4, [1, 2, 3, 4])
sage: _determine_block_matrix_rows([ [1, 1], [ A ] ])
([2, 1], [0, 0], 4)
sage: B = Matrix(ZZ, 2, 2, [1, 2, 3, 4])
sage: _determine_block_matrix_rows([ [B, B], [B, 1] ])
([2, 2], [0, 0], 4)
"""
total_width = None
row_heights = [None] * len(sub_matrices)
zero_widths = [0] * len(sub_matrices)
# We first do a pass to see if we can determine the width
unknowns = False
for i in range(len(sub_matrices)):
R = sub_matrices[i]
height = None
# We first do a pass to see if we can determine the height
# of this row
found_zeroes = False
for M in R:
if is_Matrix(M):
if height is None:
height = M.nrows()
elif height != M.nrows():
raise ValueError("incompatible submatrix heights")
elif not M:
found_zeroes = True
if not R:
height = 0
# If we have a height, then we know the dimensions of any
# non-zero scalars, and can maybe compute the width
if height is not None and not found_zeroes:
width = 0
for M in R:
if is_Matrix(M):
width += M.ncols()
else:
# non-zero scalar
width += height
if total_width is None:
total_width = width
elif total_width != width:
raise ValueError("incompatible submatrix widths")
row_heights[i] = height
else:
# We don't set height here even if we know it,
# to signal this row hasn't been fit yet.
unknowns = True
if total_width is None:
raise ValueError("insufficient information to determine submatrix widths")
if unknowns:
# Do a second pass and see if the remaining rows can be
# determined now that we know the width of the matrix.
for i in range(len(sub_matrices)):
if row_heights[i] is not None:
continue
R = sub_matrices[i]
zero_state = 0
# 0: no zeroes found
# 1: consecutive zeroes found
# 2: consecutive zeroes followed by non-zero found
# 3: non-consecutive zeroes found
scalars = 0
width = 0
height = None
for j in range(len(R)):
M = R[j]
if is_Matrix(M):
height = M.nrows()
width += M.ncols()
if zero_state == 1:
zero_state = 2
elif not M:
if zero_state == 0:
zero_state = 1
elif zero_state == 2:
zero_state = 3
else:
scalars += 1
remaining_width = total_width - width
# This remaining width has to be split over the
# zeroes and (non-zero) scalars
if height is not None:
remaining_width -= scalars * height
if remaining_width < 0:
raise ValueError("incompatible submatrix widths")
if remaining_width > 0 and zero_state == 3:
raise ValueError("insufficient information to determine submatrix widths")
if remaining_width > 0 and zero_state == 0:
raise ValueError("incompatible submatrix widths")
# otherwise, things fit
row_heights[i] = height
zero_widths[i] = remaining_width
elif zero_state != 0:
# if we don't know the height, and there are zeroes,
# we can't determine the height
raise ValueError("insufficient information to determine submatrix heights")
elif total_width % len(R):
raise ValueError("incompatible submatrix widths")
else:
height = int(total_width / len(R))
row_heights[i] = height
# If we got this far, then everything fits
return (row_heights, zero_widths, total_width)
@matrix_method
def block_matrix(*args, **kwds):
r"""
Return a larger matrix made by concatenating submatrices
(rows first, then columns). For example, the matrix
::
[ A B ]
[ C D ]
is made up of submatrices A, B, C, and D.
INPUT:
The block_matrix command takes a list of submatrices to add
as blocks, optionally preceded by a ring and the number of block rows
and block columns, and returns a matrix.
The submatrices can be specified as a list of matrices (using
``nrows`` and ``ncols`` to determine their layout), or a list
of lists of matrices, where each list forms a row.
- ``ring`` - the base ring
- ``nrows`` - the number of block rows
- ``ncols`` - the number of block cols
- ``sub_matrices`` - matrices (see below for syntax)
- ``subdivide`` - boolean, whether or not to add
subdivision information to the matrix
- ``sparse`` - boolean, whether to make the resulting matrix sparse
EXAMPLES::
sage: A = matrix(QQ, 2, 2, [3,9,6,10])
sage: block_matrix([ [A, -A], [~A, 100*A] ])
[ 3 9| -3 -9]
[ 6 10| -6 -10]
[-----------+-----------]
[-5/12 3/8| 300 900]
[ 1/4 -1/8| 600 1000]
If the number of submatrices in each row is the same,
you can specify the submatrices as a single list too::
sage: block_matrix(2, 2, [ A, A, A, A ])
[ 3 9| 3 9]
[ 6 10| 6 10]
[-----+-----]
[ 3 9| 3 9]
[ 6 10| 6 10]
One can use constant entries::
sage: block_matrix([ [1, A], [0, 1] ])
[ 1 0| 3 9]
[ 0 1| 6 10]
[-----+-----]
[ 0 0| 1 0]
[ 0 0| 0 1]
A zero entry may represent any square or non-square zero matrix::
sage: B = matrix(QQ, 1, 1, [ 1 ] )
sage: C = matrix(QQ, 2, 2, [ 2, 3, 4, 5 ] )
sage: block_matrix([ [B, 0], [0, C] ])
[1|0 0]
[-+---]
[0|2 3]
[0|4 5]
One can specify the number of rows or columns as keywords too::
sage: block_matrix([A, -A, ~A, 100*A], ncols=4)
[ 3 9| -3 -9|-5/12 3/8| 300 900]
[ 6 10| -6 -10| 1/4 -1/8| 600 1000]
sage: block_matrix([A, -A, ~A, 100*A], nrows=1)
[ 3 9| -3 -9|-5/12 3/8| 300 900]
[ 6 10| -6 -10| 1/4 -1/8| 600 1000]
It handles base rings nicely too::
sage: R.<x> = ZZ['x']
sage: block_matrix(2, 2, [1/2, A, 0, x-1])
[ 1/2 0| 3 9]
[ 0 1/2| 6 10]
[-----------+-----------]
[ 0 0|x - 1 0]
[ 0 0| 0 x - 1]
sage: block_matrix(2, 2, [1/2, A, 0, x-1]).parent()
Full MatrixSpace of 4 by 4 dense matrices over Univariate Polynomial Ring in x over Rational Field
Subdivisions are optional. If they are disabled, the columns need not line up::
sage: B = matrix(QQ, 2, 3, range(6))
sage: block_matrix([ [~A, B], [B, ~A] ], subdivide=False)
[-5/12 3/8 0 1 2]
[ 1/4 -1/8 3 4 5]
[ 0 1 2 -5/12 3/8]
[ 3 4 5 1/4 -1/8]
Without subdivisions it also deduces dimensions for scalars if possible::
sage: C = matrix(ZZ, 1, 2, range(2))
sage: block_matrix([ [ C, 0 ], [ 3, 4 ], [ 5, 6, C ] ], subdivide=False )
[0 1 0 0]
[3 0 4 0]
[0 3 0 4]
[5 6 0 1]
If all submatrices are sparse (unless there are none at all), the result
will be a sparse matrix. Otherwise it will be dense by default. The
``sparse`` keyword can be used to override this::
sage: A = Matrix(ZZ, 2, 2, [0, 1, 0, 0], sparse=True)
sage: block_matrix([ [ A ], [ A ] ]).parent()
Full MatrixSpace of 4 by 2 sparse matrices over Integer Ring
sage: block_matrix([ [ A ], [ A ] ], sparse=False).parent()
Full MatrixSpace of 4 by 2 dense matrices over Integer Ring
Consecutive zero submatrices are consolidated. ::
sage: B = matrix(2, range(4))
sage: C = matrix(2, 8, range(16))
sage: block_matrix(2, [[B,0,0,B],[C]], subdivide=False)
[ 0 1 0 0 0 0 0 1]
[ 2 3 0 0 0 0 2 3]
[ 0 1 2 3 4 5 6 7]
[ 8 9 10 11 12 13 14 15]
Ambiguity is not tolerated. ::
sage: B = matrix(2, range(4))
sage: C = matrix(2, 8, range(16))
sage: block_matrix(2, [[B,0,B,0],[C]], subdivide=False)
Traceback (most recent call last):
...
ValueError: insufficient information to determine submatrix widths
Giving only a flat list of submatrices does not work::
sage: A = matrix(2, 3, range(6))
sage: B = matrix(3, 3, range(9))
sage: block_matrix([A, A, B, B])
Traceback (most recent call last):
...
ValueError: must specify either nrows or ncols
TESTS::
sage: A = matrix(ZZ, 2, 2, [3,5,8,13])
sage: block_matrix(A)
[ 3 5]
[ 8 13]
"""
args = list(args)
sparse = kwds.get('sparse', None)
if not args:
if sparse is not None:
return matrix_space.MatrixSpace(ZZ, 0, 0, sparse=sparse)([])
else:
return matrix_space.MatrixSpace(ZZ, 0, 0)([])
if len(args) >= 1 and is_Ring(args[0]):
# A ring is specified
if kwds.get('ring', args[0]) != args[0]:
raise ValueError("base ring specified twice and they are different")
ring = args[0]
args.pop(0)
else:
ring = kwds.get('ring', None)
if len(args) >= 1:
try:
nrows = int(args[0])
args.pop(0)
if kwds.get('nrows', nrows) != nrows:
raise ValueError("number of rows specified twice and they are different")
except TypeError:
nrows = kwds.get('nrows', None)
else:
nrows = kwds.get('nrows', None)
if len(args) >= 1:
# check to see if additionally, the number of columns is specified
try:
ncols = int(args[0])
args.pop(0)
if kwds.get('ncols', ncols) != ncols:
raise ValueError("number of columns specified twice and they are different")
except TypeError:
ncols = kwds.get('ncols', None)
else:
ncols = kwds.get('ncols', None)
# Now we've taken care of initial ring, nrows, and ncols arguments.
# Now the rest of the arguments are a list of rows, a flat list of
# matrices, or a single value.
if not args:
args = [[]]
if len(args) > 1:
print(args)
raise TypeError("invalid block_matrix invocation")
sub_matrices = args[0]
if is_Matrix(sub_matrices):
M = sub_matrices
# a single matrix (check nrows/ncols/ring)
if (nrows is not None and nrows != 1) or \
(ncols is not None and ncols != 1):
raise ValueError("invalid nrows/ncols passed to block_matrix")
if ring is not None:
M = M.change_ring(ring)
if sparse is not None and M.is_sparse() != sparse:
M = M.sparse_matrix() if sparse else M.dense_matrix()
return M
if not isinstance(sub_matrices, (list, tuple)):
raise TypeError("invalid block_matrix invocation")
subdivide = kwds.get('subdivide', True)
# Will we try to place the matrices in a rectangular grid?
try_grid = True
if not sub_matrices:
if (nrows is not None and nrows != 0) or \
(ncols is not None and ncols != 0):
raise ValueError("invalid nrows/ncols passed to block_matrix")
elif isinstance(sub_matrices[0], (list, tuple)):
# A list of lists: verify all elements are lists, and if
# ncols is set, the lengths match.
if nrows is not None and len(sub_matrices) != nrows:
raise ValueError("invalid nrows passed to block_matrix")
first_len = len(sub_matrices[0])
if ncols is not None and first_len != ncols:
raise ValueError("invalid ncols passed to block_matrix")
same_length = all(isinstance(v, (list, tuple)) and len(v) == first_len for v in sub_matrices)
if subdivide and not same_length:
raise ValueError("list of rows is not valid (rows are wrong types or lengths)")
try_grid = same_length
else:
# A flat list
# determine the block dimensions
n = len(sub_matrices)
if nrows is None:
if ncols is None:
raise ValueError("must specify either nrows or ncols")
else:
nrows = n // ncols
elif ncols is None:
ncols = n // nrows
if nrows * ncols != n:
raise ValueError("given number of rows (%s), columns (%s) incompatible with number of submatrices (%s)" % (nrows, ncols, n))
# Now create a list of lists from this
sub_matrices = [sub_matrices[i * ncols: (i + 1) * ncols]
for i in range(nrows)]
# At this point sub_matrices is a list of lists
# determine the base ring and sparsity
if ring is None:
ring = ZZ
for row in sub_matrices:
for M in row:
R = M.base_ring() if is_Matrix(M) else M.parent()
if R is not ZZ:
ring = sage.categories.pushout.pushout(ring, R)
if sparse is None:
sparse = True
for row in sub_matrices:
for M in row:
if sparse and is_Matrix(M) and not M.is_sparse():
sparse = False
row_heights = None
col_widths = None
zero_widths = None
total_width = None
# We first try to place the matrices in a rectangular grid
if try_grid:
try:
(row_heights, col_widths) = _determine_block_matrix_grid(sub_matrices)
except ValueError as e:
if subdivide:
raise ValueError(e)
if col_widths is None:
# Try placing the matrices in rows instead
# (Only if subdivide is False)
(row_heights, zero_widths, total_width) = _determine_block_matrix_rows(sub_matrices)
# Success, so assemble the final matrix
big = None
for i in range(len(sub_matrices)):
R = sub_matrices[i]
row = None
for j in range(len(R)):
M = R[j]
if is_Matrix(M):
if M.base_ring() is not ring:
M = M.change_ring(ring)
if M.is_sparse() != sparse:
M = M.sparse_matrix() if sparse else M.dense_matrix()
elif not M and zero_widths is not None:
if zero_widths[i] > 0:
M = matrix(ring, row_heights[i], zero_widths[i], 0, sparse=sparse)
zero_widths[i] = 0
else:
continue # zero-width matrix
else:
if zero_widths is not None:
M = matrix(ring, row_heights[i], row_heights[i], M, sparse=sparse)
else:
M = matrix(ring, row_heights[i], col_widths[j], M, sparse=sparse)
# append M to this row
if row is None:
row = M
else:
row = row.augment(M)
# append row to final matrix
if big is None:
big = row
else:
big = big.stack(row)
if big is None:
if ring is None:
ring = ZZ
big = matrix(ring, 0, 0)
if subdivide:
big.subdivide(running_total(row_heights[:-1]),
running_total(col_widths[:-1]))
return big
@matrix_method
def block_diagonal_matrix(*sub_matrices, **kwds):
"""
Create a block matrix whose diagonal block entries are given by
sub_matrices, with zero elsewhere.
See also :meth:`block_matrix`.
EXAMPLES::
sage: A = matrix(ZZ, 2, [1,2,3,4])
sage: block_diagonal_matrix(A, A)
[1 2|0 0]
[3 4|0 0]
[---+---]
[0 0|1 2]
[0 0|3 4]
The sub-matrices need not be square::
sage: B = matrix(QQ, 2, 3, range(6))
sage: block_diagonal_matrix(~A, B)
[ -2 1| 0 0 0]
[ 3/2 -1/2| 0 0 0]
[---------+--------------]
[ 0 0| 0 1 2]
[ 0 0| 3 4 5]
"""
if isinstance(sub_matrices, (list, tuple)) and len(sub_matrices) == 1:
sub_matrices = sub_matrices[0]
n = len(sub_matrices)
entries = [ZZ.zero()] * n**2
for i in range(n):
entries[n*i+i] = sub_matrices[i]
return block_matrix(n, n, entries, **kwds)
@matrix_method
def jordan_block(eigenvalue, size, sparse=False):
r"""
Return the Jordan block for the given eigenvalue with given size.
INPUT:
- ``eigenvalue`` -- eigenvalue for the diagonal entries of the block
- ``size`` -- size of the square matrix
- ``sparse`` -- (default: ``False``) - if ``True``, return a sparse matrix
EXAMPLES::
sage: jordan_block(5, 3)
[5 1 0]
[0 5 1]
[0 0 5]
TESTS::
sage: jordan_block(6.2, 'junk')
Traceback (most recent call last):
...
TypeError: size of Jordan block needs to be an integer, not junk
sage: jordan_block(6.2, -1)
Traceback (most recent call last):
...
ValueError: size of Jordan block must be non-negative, not -1
"""
try:
size = ZZ(size)
except TypeError:
msg = "size of Jordan block needs to be an integer, not {0}"
raise TypeError(msg.format(size))
if size < 0:
msg = "size of Jordan block must be non-negative, not {0}"
raise ValueError(msg.format(size))
block = diagonal_matrix([eigenvalue] * size, sparse=sparse)
for i in range(size - 1):
block[i, i + 1] = 1
return block
@matrix_method
def companion_matrix(poly, format='right'):
r"""
Create a companion matrix from a monic polynomial.
INPUT:
- ``poly`` -- a univariate polynomial, or an iterable containing
the coefficients of a polynomial, with low-degree coefficients first.
The polynomial (or the polynomial implied by the coefficients) must
be monic. In other words, the leading coefficient must be one.
A symbolic expression that might also be a polynomial is not
proper input, see examples below.
- ``format`` -- default: 'right' - specifies one of four
variations of a companion matrix. Allowable values are
'right', 'left', 'top' and 'bottom', which indicates which
border of the matrix contains the negatives of the coefficients.
OUTPUT:
A square matrix with a size equal to the degree of the polynomial.
The returned matrix has ones above, or below the diagonal, and the
negatives of the coefficients along the indicated border of the
matrix (excepting the leading one coefficient).
See the first examples below for precise illustrations.
EXAMPLES:
Each of the four possibilities. Notice that the coefficients are
specified and their negatives become the entries of the matrix. The
leading one must be given, but is not used. The permutation matrix
``P`` is the identity matrix, with the columns reversed. The last three
statements test the general relationships between the four variants. ::
sage: poly = [-2, -3, -4, -5, -6, 1]
sage: R = companion_matrix(poly, format='right'); R
[0 0 0 0 2]
[1 0 0 0 3]
[0 1 0 0 4]
[0 0 1 0 5]
[0 0 0 1 6]
sage: L = companion_matrix(poly, format='left'); L
[6 1 0 0 0]
[5 0 1 0 0]
[4 0 0 1 0]
[3 0 0 0 1]
[2 0 0 0 0]
sage: B = companion_matrix(poly, format='bottom'); B
[0 1 0 0 0]
[0 0 1 0 0]
[0 0 0 1 0]
[0 0 0 0 1]
[2 3 4 5 6]
sage: T = companion_matrix(poly, format='top'); T
[6 5 4 3 2]
[1 0 0 0 0]
[0 1 0 0 0]
[0 0 1 0 0]
[0 0 0 1 0]
sage: perm = Permutation([5, 4, 3, 2, 1])
sage: P = perm.to_matrix()
sage: L == P*R*P
True
sage: B == R.transpose()
True
sage: T == P*R.transpose()*P
True
A polynomial may be used as input, however a symbolic expression,
even if it looks like a polynomial, is not regarded as such when used
as input to this routine. Obtaining the list of coefficients from a
symbolic polynomial is one route to the companion matrix. ::
sage: x = polygen(QQ, 'x')
sage: p = x^3 - 4*x^2 + 8*x - 12
sage: companion_matrix(p)
[ 0 0 12]
[ 1 0 -8]
[ 0 1 4]
sage: y = var('y')
sage: q = y^3 -2*y + 1
sage: companion_matrix(q)
Traceback (most recent call last):
...
TypeError: input must be a polynomial (not a symbolic expression, see docstring), or other iterable, not y^3 - 2*y + 1
sage: coeff_list = [q(y=0)] + [q.coefficient(y^k) for k in range(1, q.degree(y)+1)]
sage: coeff_list
[1, -2, 0, 1]
sage: companion_matrix(coeff_list)
[ 0 0 -1]
[ 1 0 2]
[ 0 1 0]
The minimal polynomial of a companion matrix is equal to the
polynomial used to create it. Used in a block diagonal
construction, they can be used to create matrices with
any desired minimal polynomial, or characteristic polynomial. ::
sage: t = polygen(QQ, 't')
sage: p = t^12 - 7*t^4 + 28*t^2 - 456
sage: C = companion_matrix(p, format='top')
sage: q = C.minpoly(var='t'); q
t^12 - 7*t^4 + 28*t^2 - 456
sage: p == q
True
sage: p = t^3 + 3*t - 8
sage: q = t^5 + t - 17
sage: A = block_diagonal_matrix( companion_matrix(p),
....: companion_matrix(p^2),
....: companion_matrix(q),
....: companion_matrix(q) )
sage: A.charpoly(var='t').factor()
(t^3 + 3*t - 8)^3 * (t^5 + t - 17)^2
sage: A.minpoly(var='t').factor()
(t^3 + 3*t - 8)^2 * (t^5 + t - 17)
TESTS::
sage: companion_matrix([4, 5, 1], format='junk')
Traceback (most recent call last):
...
ValueError: format must be 'right', 'left', 'top' or 'bottom', not junk
sage: companion_matrix(sin(x))
Traceback (most recent call last):
...
TypeError: input must be a polynomial (not a symbolic expression, see docstring), or other iterable, not sin(x)
sage: companion_matrix([2, 3, 896])
Traceback (most recent call last):
...
ValueError: polynomial (or the polynomial implied by coefficients) must be monic, not a leading coefficient of 896
sage: F.<a> = GF(2^2)
sage: companion_matrix([4/3, a+1, 1])
Traceback (most recent call last):
...
TypeError: unable to find common ring for coefficients from polynomial
sage: A = companion_matrix([1])
sage: A.nrows(); A.ncols()
0
0
sage: A = companion_matrix([])
Traceback (most recent call last):
...
ValueError: polynomial cannot be specified by an empty list
AUTHOR:
- Rob Beezer (2011-05-19)
"""
import sage.matrix.constructor
if format not in ['right', 'left', 'top', 'bottom']:
raise ValueError("format must be 'right', 'left', 'top' or 'bottom', not {0}".format(format))
try:
poly = list(poly)
except TypeError:
raise TypeError('input must be a polynomial (not a symbolic expression, see docstring), or other iterable, not {0}'.format(poly))
n = len(poly) - 1
if n == -1:
raise ValueError('polynomial cannot be specified by an empty list')
if not poly[n] == 1:
raise ValueError('polynomial (or the polynomial implied by coefficients) must be monic, not a leading coefficient of {0}'.format(poly[n]))
entries = [0] * (n * n)
# 1's below diagonal, or above diagonal
if format in ['right', 'top']:
for i in range(n - 1):
entries[(i+1)*n + i] = 1
else:
for i in range(n-1):
entries[i*n + i+1] = 1
# right side, left side (reversed), bottom edge, top edge (reversed)
if format == 'right':
for i in range(n):
entries[i*n + n-1] = -poly[i]
elif format == 'left':
for i in range(n):
entries[(n-1-i)*n + 0] = -poly[i]
elif format == 'bottom':
for i in range(n):
entries[(n-1)*n + i] = -poly[i]
elif format == 'top':
for i in range(n):
entries[0*n + n-1-i] = -poly[i]
try:
M = sage.matrix.constructor.matrix(n, n, entries)
except TypeError:
raise TypeError("unable to find common ring for coefficients from polynomial")
return M
@matrix_method
def random_rref_matrix(parent, num_pivots):
r"""
Generate a matrix in reduced row-echelon form with a specified number of non-zero rows.
INPUT:
- ``parent`` -- A matrix space specifying the base ring, dimensions and
representation (dense/sparse) for the result. The base ring must be exact.
- ``num_pivots`` -- The number of non-zero rows in the result, i.e. the rank.
OUTPUT:
A matrix in reduced row echelon form with ``num_pivots`` non-zero rows. If the
base ring is `ZZ` or `QQ` then the entries are all integers.
.. note::
It is easiest to use this function via a call to the
:func:`~sage.matrix.constructor.random_matrix`
function with the ``algorithm='echelon_form'`` keyword. We provide
one example accessing this function directly, while the remainder will
use this more general function.
EXAMPLES:
Matrices generated are in reduced row-echelon form with specified rank. If the
base ring is `QQ` the result has only integer entries. ::
sage: from sage.matrix.constructor import random_rref_matrix
sage: matrix_space = sage.matrix.matrix_space.MatrixSpace(QQ, 5, 6)
sage: A = random_rref_matrix(matrix_space, num_pivots=4); A # random
[ 1 0 0 -6 0 -3]
[ 0 1 0 2 0 3]
[ 0 0 1 -4 0 -2]
[ 0 0 0 0 1 3]
[ 0 0 0 0 0 0]
sage: A.base_ring()
Rational Field
sage: (A.nrows(), A.ncols())
(5, 6)
sage: A in sage.matrix.matrix_space.MatrixSpace(ZZ, 5, 6)
True
sage: A.rank()
4
sage: A == A.rref()
True
Matrices can be generated over other exact rings. ::
sage: B = random_matrix(FiniteField(7), 4, 4, algorithm='echelon_form', num_pivots=3); B # random
[1 0 0 0]
[0 1 0 6]
[0 0 1 4]
[0 0 0 0]
sage: B.rank() == 3
True
sage: B.base_ring()
Finite Field of size 7
sage: B == B.rref()
True
TESTS:
Rank of a matrix must be an integer. ::
sage: random_matrix(QQ, 120, 56, algorithm='echelon_form', num_pivots=61/2)
Traceback (most recent call last):
...
TypeError: the number of pivots must be an integer.
Matrices must be generated over exact fields. ::
sage: random_matrix(RR, 40, 88, algorithm='echelon_form', num_pivots=39)
Traceback (most recent call last):
...
TypeError: the base ring must be exact.
Matrices must have the number of pivot columns be less than or equal to the number of rows. ::
sage: C=random_matrix(ZZ, 6,4, algorithm='echelon_form', num_pivots=7); C
Traceback (most recent call last):
...
ValueError: number of pivots cannot exceed the number of rows or columns.
Matrices must have the number of pivot columns be less than or equal to the number of columns. ::
sage: D=random_matrix(QQ, 1,3, algorithm='echelon_form', num_pivots=5); D
Traceback (most recent call last):
...
ValueError: number of pivots cannot exceed the number of rows or columns.
Matrices must have the number of pivot columns be greater than zero. ::
sage: random_matrix(QQ, 5, 4, algorithm='echelon_form', num_pivots=-1)
Traceback (most recent call last):
...
ValueError: the number of pivots must be zero or greater.
AUTHOR:
Billy Wonderly (2010-07)
"""
import sage.probability.probability_distribution as pd
from sage.misc.prandom import randint
try:
num_pivots = ZZ(num_pivots)
except TypeError:
raise TypeError("the number of pivots must be an integer.")
if num_pivots < 0:
raise ValueError("the number of pivots must be zero or greater.")
ring = parent.base_ring()
if not ring.is_exact():
raise TypeError("the base ring must be exact.")
num_row = parent.nrows()
num_col = parent.ncols()
if num_pivots > num_row or num_pivots > num_col:
raise ValueError("number of pivots cannot exceed the number of rows or columns.")
else:
one = ring.one()
# Create a matrix of the desired size to be modified and then returned.
return_matrix = copy(parent.zero_matrix())
pivots = [0] #Force first column to be a pivot. No harm if no pivots at all.
# Probability distribution for the placement of leading one's.
pivot_generator = pd.RealDistribution("beta", [1.6, 4.3])
while len(pivots) < num_pivots:
pivot_column = int(pivot_generator.get_random_element() * num_col)
if pivot_column not in pivots:
pivots.append(pivot_column)
pivots.sort()
pivot_row = 0
# Use the list of pivot columns to set the pivot entries of the return_matrix to leading ones.
while pivot_row < num_pivots:
return_matrix[pivot_row, pivots[pivot_row]] = one
pivot_row += 1
if ring is QQ or ring is ZZ:
# Keep track of the non-pivot columns by using the pivot_index, start at the first column to
# the right of the initial pivot column, go until the first column to the left of the next
# pivot column.
for pivot_index in range(num_pivots-1):
for non_pivot_column_index in range(pivots[pivot_index]+1, pivots[pivot_index+1]):
entry_generator1 = pd.RealDistribution("beta", [6, 4])
# Experimental distribution used to generate the values.
for non_pivot_column_entry in range(pivot_index+1):
sign1 = (2*randint(0,1)-1)
return_matrix[non_pivot_column_entry,non_pivot_column_index]=sign1*int(entry_generator1.get_random_element()*((1-non_pivot_column_entry/return_matrix.ncols())*7))
# Use index to fill entries of the columns to the right of the last pivot column.
for rest_non_pivot_column in range(pivots[num_pivots-1]+1,num_col):
entry_generator2=pd.RealDistribution("beta",[2.6,4])
# experimental distribution to generate small values.
for rest_entries in range(num_pivots):
sign2=(2*randint(0,1)-1)
return_matrix[rest_entries,rest_non_pivot_column]=sign2*int(entry_generator2.get_random_element()*5)
else:
for pivot_index in range(num_pivots-1):
for non_pivot_column_index in range(pivots[pivot_index]+1,pivots[pivot_index+1]):
for non_pivot_column_entry in range(pivot_index+1):
return_matrix[non_pivot_column_entry,non_pivot_column_index]=ring.random_element()
for rest_non_pivot_column in range(pivots[num_pivots-1]+1,num_col):
for rest_entries in range(num_pivots):
return_matrix[rest_entries,rest_non_pivot_column]=ring.random_element()
return return_matrix
@matrix_method
def random_echelonizable_matrix(parent, rank, upper_bound=None, max_tries=100):
r"""
Generate a matrix of a desired size and rank, over a desired ring, whose reduced
row-echelon form has only integral values.
INPUT:
- ``parent`` -- A matrix space specifying the base ring, dimensions and
representation (dense/sparse) for the result. The base ring must be exact.
- ``rank`` -- Rank of result, i.e the number of non-zero rows in the
reduced row echelon form.
- ``upper_bound`` -- If designated, size control of the matrix entries is desired.
Set ``upper_bound`` to 1 more than the maximum value entries can achieve.
If None, no size control occurs. But see the warning below. (default: None)
- ``max_tries`` - If designated, number of tries used to generate each new random row;
only matters when upper_bound!=None. Used to prevent endless looping. (default: 100)
OUTPUT:
A matrix not in reduced row-echelon form with the desired dimensions and properties.
.. warning::
When ``upper_bound`` is set, it is possible for this constructor to
fail with a ``ValueError``. This may happen when the ``upper_bound``,
``rank`` and/or matrix dimensions are all so small that it becomes
infeasible or unlikely to create the requested matrix. If you *must*
have this routine return successfully, do not set ``upper_bound``.
.. note::
It is easiest to use this function via a call to the
:func:`~sage.matrix.constructor.random_matrix`
function with the ``algorithm='echelonizable'`` keyword. We provide
one example accessing this function directly, while the remainder will
use this more general function.
EXAMPLES:
Generated matrices have the desired dimensions, rank and entry size. The
matrix in reduced row-echelon form has only integer entries. ::
sage: from sage.matrix.constructor import random_echelonizable_matrix
sage: matrix_space = sage.matrix.matrix_space.MatrixSpace(QQ, 5, 6)
sage: A = random_echelonizable_matrix(matrix_space, rank=4, upper_bound=40); A
[ 3 4 12 39 18 22]
[ -1 -3 -9 -27 -16 -19]
[ 1 3 10 31 18 21]
[ -1 0 0 -2 2 2]
[ 0 1 2 8 4 5]
sage: A.rank()
4
sage: max(map(abs,A.list()))<40
True
sage: A.rref() == A.rref().change_ring(ZZ)
True
An example with default settings (i.e. no entry size control). ::
sage: C=random_matrix(QQ, 6, 7, algorithm='echelonizable', rank=5); C
[ 1 -5 -8 16 6 65 30]
[ 3 -14 -22 42 17 178 84]
[ -5 24 39 -79 -31 -320 -148]
[ 4 -15 -26 55 27 224 106]
[ -1 0 -6 29 8 65 17]
[ 3 -20 -32 72 14 250 107]
sage: C.rank()
5
sage: C.rref() == C.rref().change_ring(ZZ)
True
A matrix without size control may have very large entry sizes. ::
sage: D=random_matrix(ZZ, 7, 8, algorithm='echelonizable', rank=6); D
[ 1 2 8 -35 -178 -239 -284 778]
[ 4 9 37 -163 -827 -1111 -1324 3624]
[ 5 6 21 -88 -454 -607 -708 1951]
[ -4 -5 -22 97 491 656 779 -2140]
[ 4 4 13 -55 -283 -377 -436 1206]
[ 4 11 43 -194 -982 -1319 -1576 4310]
[ -1 -2 -13 59 294 394 481 -1312]
Matrices can be generated over any exact ring. ::
sage: F.<a>=GF(2^3)
sage: B = random_matrix(F, 4, 5, algorithm='echelonizable', rank=4, upper_bound=None); B
[ 1 a + 1 0 a^2 + a + 1 1]
[ a a^2 + a + 1 a^2 + 1 a^2 + a 0]
[ a^2 + a 1 1 a^2 + a a + 1]
[a^2 + a + 1 a^2 + a + 1 a^2 0 a^2 + a]
sage: B.rank()
4
Square matrices over ZZ or QQ with full rank are always unimodular. ::
sage: E=random_matrix(QQ, 7, 7, algorithm='echelonizable', rank=7); E
[ 1 1 7 -29 139 206 413]
[ -2 -1 -10 41 -197 -292 -584]
[ 2 5 27 -113 541 803 1618]
[ 4 0 14 -55 268 399 798]
[ 3 1 8 -32 152 218 412]
[ -3 -2 -18 70 -343 -506 -1001]
[ 1 -2 -1 1 -2 9 52]
sage: det(E)
1
TESTS:
Matrices must have a rank zero or greater, and less than
both the number of rows and the number of columns. ::
sage: random_matrix(QQ, 3, 4, algorithm='echelonizable', rank=-1)
Traceback (most recent call last):
...
ValueError: matrices must have rank zero or greater.
sage: random_matrix(QQ, 3, 8, algorithm='echelonizable', rank=4)
Traceback (most recent call last):
...
ValueError: matrices cannot have rank greater than min(ncols,nrows).
sage: random_matrix(QQ, 8, 3, algorithm='echelonizable', rank=4)
Traceback (most recent call last):
...
ValueError: matrices cannot have rank greater than min(ncols,nrows).
The base ring must be exact. ::
sage: random_matrix(RR, 3, 3, algorithm='echelonizable', rank=2)
Traceback (most recent call last):
...
TypeError: the base ring must be exact.
Works for rank==1, too. ::
sage: random_matrix( QQ, 3, 3, algorithm='echelonizable', rank=1).ncols()
3
AUTHOR:
Billy Wonderly (2010-07)
"""
from sage.misc.prandom import randint
ring = parent.base_ring()
rows = parent.nrows()
if rank < 0:
raise ValueError("matrices must have rank zero or greater.")
if rank > min(rows,parent.ncols()):
raise ValueError("matrices cannot have rank greater than min(ncols,nrows).")
matrix = random_rref_matrix(parent, rank)
# Entries of matrices over the ZZ or QQ can get large, entry size is regulated by finding the largest
# entry of the resultant matrix after addition of scalar multiple of a row.
if ring is QQ or ring is ZZ:
# If upper_bound is not set, don't control entry size.
if upper_bound is None:
# If size control is not desired, the routine will run slightly faster, particularly with large matrices.
for pivots in range(rank-1, -1, -1):
row_index = 0
while row_index < rows:
if pivots == row_index:
row_index += 1
if pivots != row_index and row_index != rows:
matrix.add_multiple_of_row(row_index,
matrix.pivot_rows()[pivots],
randint(-5, 5))
row_index += 1
if rows > 1:
matrix.add_multiple_of_row(0, randint(1,rows-1), randint(-3,3))
else:
if rank == 1: # would be better just to have a special generator...
tries = 0
while max(abs(c) for c in matrix.list()) >= upper_bound:
matrix = random_rref_matrix(parent, rank)
tries += 1
if tries > max_tries: # to prevent endless attempts
raise ValueError("tried "+str(max_tries)+" times to get a rank 1 random matrix. Try bigger upper_bound?")
matrix_copy = matrix
for pivots in range(len(matrix.pivots()) - 1, -1, -1):
# keep track of the pivot column positions from the pivot column with the largest index to
# the one with the smallest.
row_index = 0
tries = 0
while row_index < rows:
# To each row in a pivot column add a scalar multiple of the pivot row.
# for full rank, square matrices, using only this row operation preserves the determinant of 1.
if pivots!=row_index:
# To ensure a leading one is not removed by the addition of the pivot row by its
# additive inverse.
matrix_copy=matrix.with_added_multiple_of_row(row_index,matrix.pivot_rows()[pivots],randint(-5,5))
tries += 1
# Range for scalar multiples determined experimentally.
if max(map(abs,matrix_copy.list())) < upper_bound:
# Continue if the largest entry after a row operation is within the bound.
matrix=matrix_copy
row_index+=1
tries = 0
if tries > max_tries: # to prevent endless unsuccessful row adding
raise ValueError("tried "+str(max_tries)+" times to get row number "+str(row_index)+". Try bigger upper_bound?")
# The leading one in row one has not been altered, so add a scalar multiple of a random row
# to row one.
row1=0
if rows>1:
while row1<1:
matrix_copy=matrix.with_added_multiple_of_row(0,randint(1,rows-1),randint(-3,3))
if max(map(abs,matrix_copy.list())) < upper_bound:
matrix=matrix_copy
row1+=1
# If the matrix generated over a different ring, random elements from the designated ring are used as and
# the routine is run similarly to the size unchecked version for rationals and integers.
else:
for pivots in range(rank-1,-1,-1):
row_index=0
while row_index<rows:
if pivots==row_index:
row_index+=1
if pivots!=row_index and row_index!=rows:
matrix.add_multiple_of_row(row_index,matrix.pivot_rows()[pivots],ring.random_element())
row_index+=1
if rows>1:
matrix.add_multiple_of_row(0,randint(1,rows-1),ring.random_element())
return matrix
@matrix_method
def random_subspaces_matrix(parent, rank=None):
r"""
Create a matrix of the designated size and rank whose right and
left null spaces, column space, and row space have desirable
properties that simplify the subspaces.
INPUT:
- ``parent`` - A matrix space specifying the base ring, dimensions, and
representation (dense/sparse) for the result. The base ring must be exact.
- ``rank`` - The desired rank of the return matrix (default: None).
OUTPUT:
A matrix whose natural basis vectors for its four subspaces, when
computed, have reasonably sized, integral valued, entries.
.. note::
It is easiest to use this function via a call to the
:func:`~sage.matrix.constructor.random_matrix`
function with the ``algorithm='subspaces'`` keyword. We provide
one example accessing this function directly, while the remainder will
use this more general function.
EXAMPLES:
A 6x8 matrix with designated rank of 3. The four subspaces are
determined using one simple routine in which we augment the
original matrix with the equal row dimension identity matrix. The
resulting matrix is then put in reduced row-echelon form and the
subspaces can then be determined by analyzing subdivisions of this
matrix. See the four subspaces routine in [Bee]_ for more. ::
sage: from sage.matrix.constructor import random_subspaces_matrix
sage: matrix_space = sage.matrix.matrix_space.MatrixSpace(QQ, 6, 8)
sage: B = random_subspaces_matrix(matrix_space, rank=3); B
[ -15 -4 83 35 -24 47 -74 50]
[ -16 -7 94 34 -25 38 -75 50]
[ 89 34 -513 -196 141 -235 426 -285]
[ 17 6 -97 -38 27 -47 82 -55]
[ 7 3 -41 -15 11 -17 33 -22]
[ -5 -2 29 11 -8 13 -24 16]
sage: B.rank()
3
sage: B.nullity()
3
sage: (B.nrows(), B.ncols())
(6, 8)
sage: all(x in ZZ for x in B.list())
True
sage: B_expanded = B.augment(identity_matrix(6)).rref()
sage: all(x in ZZ for x in B_expanded.list())
True
sage: B_expanded
[ 1 0 -5 0 -1 1 0 -1 0 0 0 3 10 24]
[ 0 1 -2 0 1 2 1 0 0 0 0 -2 -3 -11]
[ 0 0 0 1 -1 2 -2 1 0 0 0 1 4 9]
[ 0 0 0 0 0 0 0 0 1 0 0 2 -2 1]
[ 0 0 0 0 0 0 0 0 0 1 0 0 3 1]
[ 0 0 0 0 0 0 0 0 0 0 1 -3 -4 2]
Check that we fixed :trac:`10543` (echelon forms should be immutable)::
sage: B_expanded.is_immutable()
True
We want to modify B_expanded, so replace it with a copy::
sage: B_expanded = copy(B_expanded)
sage: B_expanded.subdivide(B.nrows()-B.nullity(),B.ncols());B_expanded
[ 1 0 -5 0 -1 1 0 -1| 0 0 0 3 10 24]
[ 0 1 -2 0 1 2 1 0| 0 0 0 -2 -3 -11]
[ 0 0 0 1 -1 2 -2 1| 0 0 0 1 4 9]
[-------------------------------+-----------------------]
[ 0 0 0 0 0 0 0 0| 1 0 0 2 -2 1]
[ 0 0 0 0 0 0 0 0| 0 1 0 0 3 1]
[ 0 0 0 0 0 0 0 0| 0 0 1 -3 -4 2]
sage: C=B_expanded.subdivision(0,0)
sage: C
[ 1 0 -5 0 -1 1 0 -1]
[ 0 1 -2 0 1 2 1 0]
[ 0 0 0 1 -1 2 -2 1]
sage: L=B_expanded.subdivision(1,1)
sage: L
[ 1 0 0 2 -2 1]
[ 0 1 0 0 3 1]
[ 0 0 1 -3 -4 2]
sage: B.right_kernel() == C.right_kernel()
True
sage: B.row_space() == C.row_space()
True
sage: B.column_space() == L.right_kernel()
True
sage: B.left_kernel() == L.row_space()
True
A matrix to show that the null space of the L matrix is the column space of the starting matrix. ::
sage: A = random_matrix(QQ, 5, 7, algorithm='subspaces', rank=None); A
[ -63 13 -71 29 -163 150 -268]
[ 24 -5 27 -11 62 -57 102]
[ 14 -3 16 -7 37 -34 60]
[ -4 1 -4 1 -9 8 -16]
[ 9 -2 10 -4 23 -21 38]
sage: (A.nrows(), A.ncols())
(5, 7)
sage: all(x in ZZ for x in A.list())
True
sage: A.nullity()
2
sage: A_expanded=A.augment(identity_matrix(5)).rref()
sage: A_expanded
[ 1 0 0 2 -1 1 2 0 2 0 -4 -7]
[ 0 1 0 1 -1 0 0 0 4 0 -3 -12]
[ 0 0 1 -2 3 -3 2 0 -1 0 3 4]
[ 0 0 0 0 0 0 0 1 3 0 0 -1]
[ 0 0 0 0 0 0 0 0 0 1 -1 -2]
sage: all(x in ZZ for x in A_expanded.list())
True
sage: C=A_expanded.submatrix(0,0,A.nrows()-A.nullity(),A.ncols())
sage: L=A_expanded.submatrix(A.nrows()-A.nullity(),A.ncols())
sage: A.right_kernel() == C.right_kernel()
True
sage: A.row_space() == C.row_space()
True
sage: A.column_space() == L.right_kernel()
True
sage: A.left_kernel() == L.row_space()
True
TESTS:
The designated rank of the L matrix cannot be greater than the
number of desired rows, nor can the rank be negative. ::
sage: random_matrix(QQ, 19, 20, algorithm='subspaces', rank=21)
Traceback (most recent call last):
...
ValueError: rank cannot exceed the number of rows or columns.
sage: random_matrix(QQ, 19, 20, algorithm='subspaces', rank=-1)
Traceback (most recent call last):
...
ValueError: matrices must have rank zero or greater.
AUTHOR:
Billy Wonderly (2010-07)
"""
import sage.probability.probability_distribution as pd
ring = parent.base_ring()
rows = parent.nrows()
columns = parent.ncols()
# If rank is not designated, generate using probability distribution
# skewing to smaller numbers, always at least 1.
if rank is None:
left_nullity_generator = pd.RealDistribution("beta", [1.4, 5.5])
nullity = int(left_nullity_generator.get_random_element()*(rows-1) + 1)
rank = rows - nullity
if rank<0:
raise ValueError("matrices must have rank zero or greater.")
if rank > rows or rank > columns:
raise ValueError("rank cannot exceed the number of rows or columns.")
nullity = rows - rank
B = random_matrix(ring, rows, columns, algorithm='echelon_form',
num_pivots=rank)
# Create a nonsingular matrix whose columns will be used to stack a matrix
# over the L matrix, forming a nonsingular matrix.
K_nonzero_columns = random_matrix(ring, rank, rank,
algorithm='echelonizable', rank=rank)
K = matrix(QQ, rank, rows)
L = random_matrix(ring, nullity, rows, algorithm='echelon_form',
num_pivots=nullity)
for column in range(len(L.nonpivots())):
for entry in range(rank):
K[entry, L.nonpivots()[column]] = K_nonzero_columns[entry, column]
J = K.stack(L)
# By multiplying the B matrix by J.inverse() we hide the B matrix of the
# solution using row operations required to change the solution K matrix to
# the identity matrix.
return J.inverse() * B
@matrix_method
def random_unimodular_matrix(parent, upper_bound=None, max_tries=100):
r"""
Generate a random unimodular (determinant 1) matrix of a desired size over a desired ring.
INPUT:
- ``parent`` - A matrix space specifying the base ring, dimensions
and representation (dense/sparse) for the result. The base ring
must be exact.
- ``upper_bound`` - For large matrices over QQ or ZZ,
``upper_bound`` is the largest value matrix entries can achieve. But
see the warning below.
- ``max_tries`` - If designated, number of tries used to generate each new random row;
only matters when upper_bound!=None. Used to prevent endless looping. (default: 100)
A matrix not in reduced row-echelon form with the desired dimensions and properties.
OUTPUT:
An invertible matrix with the desired properties and determinant 1.
.. warning::
When ``upper_bound`` is set, it is possible for this constructor to
fail with a ``ValueError``. This may happen when the ``upper_bound``,
``rank`` and/or matrix dimensions are all so small that it becomes
infeasible or unlikely to create the requested matrix. If you *must*
have this routine return successfully, do not set ``upper_bound``.
.. note::
It is easiest to use this function via a call to the
:func:`~sage.matrix.constructor.random_matrix`
function with the ``algorithm='unimodular'`` keyword. We provide
one example accessing this function directly, while the remainder will
use this more general function.
EXAMPLES:
A matrix size 5 over QQ. ::
sage: from sage.matrix.constructor import random_unimodular_matrix
sage: matrix_space = sage.matrix.matrix_space.MatrixSpace(QQ, 5)
sage: A = random_unimodular_matrix(matrix_space); A
[ 0 3 8 -30 -30]
[ 0 1 4 -18 -13]
[ -1 0 0 3 0]
[ 4 16 71 -334 -222]
[ -1 -1 -9 50 24]
sage: det(A)
1
A matrix size 6 with entries no larger than 50. ::
sage: B = random_matrix(ZZ, 7, algorithm='unimodular', upper_bound=50);B
[-14 17 14 -31 43 24 46]
[ -5 6 5 -11 15 9 18]
[ -2 5 3 -7 15 -3 -16]
[ 1 -2 -3 4 -3 -7 -21]
[ -1 4 1 -4 14 -10 -37]
[ 3 -3 -1 6 -12 4 25]
[ 4 -4 -2 7 -13 -2 1]
sage: det(B)
1
A matrix over the number Field in `y` with defining polynomial `y^2-2y-2`. ::
sage: y = var('y')
sage: K=NumberField(y^2-2*y-2,'y')
sage: C=random_matrix(K, 3, algorithm='unimodular');C
[ -5*y + 11 10*y - 30 -695*y + 2366]
[ 5 5*y - 9 -535*y + 588]
[ y - 1 3*y - 1 -35*y - 273]
sage: det(C)
1
TESTS:
Unimodular matrices are square. ::
sage: random_matrix(QQ, 5, 6, algorithm='unimodular')
Traceback (most recent call last):
...
TypeError: a unimodular matrix must be square.
Only matrices over ZZ and QQ can have size control. ::
sage: F.<a>=GF(5^7)
sage: random_matrix(F, 5, algorithm='unimodular', upper_bound=20)
Traceback (most recent call last):
...
TypeError: only matrices over ZZ or QQ can have size control.
AUTHOR:
Billy Wonderly (2010-07)
"""
ring=parent.base_ring()
size=parent.nrows()
if parent.nrows()!=parent.ncols():
raise TypeError("a unimodular matrix must be square.")
if upper_bound is not None and (ring!=ZZ and ring!=QQ):
raise TypeError("only matrices over ZZ or QQ can have size control.")
if upper_bound is None:
# random_echelonizable_matrix() always returns a determinant one matrix if given full rank.
return random_matrix(ring, size, algorithm='echelonizable', rank=size)
elif upper_bound is not None and (ring==ZZ or ring==QQ):
return random_matrix(ring, size,algorithm='echelonizable',rank=size, upper_bound=upper_bound, max_tries=max_tries)
@matrix_method
def random_diagonalizable_matrix(parent,eigenvalues=None,dimensions=None):
"""
Create a random matrix that diagonalizes nicely.
To be used as a teaching tool. Return matrices have only real
eigenvalues.
INPUT:
If eigenvalues and dimensions are not specified in a list,
they will be assigned randomly.
- ``parent`` - the desired size of the square matrix.
- ``eigenvalues`` - the list of desired eigenvalues (default=None).
- ``dimensions`` - the list of dimensions corresponding to each
eigenspace (default=None).
OUTPUT:
A square, diagonalizable, matrix with only integer entries. The
eigenspaces of this matrix, if computed by hand, give basis
vectors with only integer entries.
.. note::
It is easiest to use this function via a call to the
:func:`~sage.matrix.constructor.random_matrix`
function with the ``algorithm='diagonalizable'`` keyword. We provide
one example accessing this function directly, while the remainder will
use this more general function.
EXAMPLES:
A diagonalizable matrix, size 5. ::
sage: from sage.matrix.constructor import random_diagonalizable_matrix
sage: matrix_space = sage.matrix.matrix_space.MatrixSpace(QQ, 5)
sage: A = random_diagonalizable_matrix(matrix_space); A
[ 90 -80 56 -448 -588]
[ 60 0 28 -324 -204]
[ 60 -72 32 -264 -432]
[ 30 -16 16 -152 -156]
[ -10 -8 -4 60 8]
sage: sorted(A.eigenvalues())
[-10, -8, -4, 0, 0]
sage: S=A.right_eigenmatrix()[1]; S
[ 1 1 1 1 0]
[ 1/2 0 2/3 0 1]
[ 4/7 9/10 2/3 6/7 -3/7]
[ 2/7 1/5 1/3 3/14 1/7]
[-1/14 1/10 -1/9 1/14 -2/7]
sage: S_inverse=S.inverse(); S_inverse
[ 0 0 -14 42 42]
[ 0 10 0 -10 30]
[ -9 0 0 36 18]
[ 10 -10 14 -68 -90]
[ 6 1 7 -45 -33]
sage: S_inverse*A*S
[ -4 0 0 0 0]
[ 0 -8 0 0 0]
[ 0 0 -10 0 0]
[ 0 0 0 0 0]
[ 0 0 0 0 0]
A diagonalizable matrix with eigenvalues and dimensions designated,
with a check that if eigenvectors were calculated by hand
entries would all be integers. ::
sage: B = random_matrix(QQ, 6, algorithm='diagonalizable', eigenvalues=[-12,4,6],dimensions=[2,3,1]); B
[ 2 -64 16 206 56 -142]
[ 14 -28 -64 46 40 -14]
[ -4 -16 4 44 32 -28]
[ 6 0 -32 -22 8 26]
[ 0 -16 0 48 20 -32]
[ 2 0 -16 -14 8 18]
sage: all(x in ZZ for x in (B-(-12*identity_matrix(6))).rref().list())
True
sage: all(x in ZZ for x in (B-(4*identity_matrix(6))).rref().list())
True
sage: all(x in ZZ for x in (B-(6*identity_matrix(6))).rref().list())
True
sage: S=B.right_eigenmatrix()[1]; S_inverse=S.inverse(); S_inverse*B*S
[ 6 0 0 0 0 0]
[ 0 -12 0 0 0 0]
[ 0 0 -12 0 0 0]
[ 0 0 0 4 0 0]
[ 0 0 0 0 4 0]
[ 0 0 0 0 0 4]
TESTS:
Eigenvalues must all be integers. ::
sage: random_matrix(QQ,3,algorithm='diagonalizable', eigenvalues=[2+I,2-I,2],dimensions=[1,1,1])
Traceback (most recent call last):
...
TypeError: eigenvalues must be integers.
Diagonal matrices must be square. ::
sage: random_matrix(QQ, 5, 7, algorithm='diagonalizable', eigenvalues=[-5,2,-3], dimensions=[1,1,3])
Traceback (most recent call last):
...
TypeError: a diagonalizable matrix must be square.
A list of eigenvalues must be accompanied with a list of dimensions. ::
sage: random_matrix(QQ,10,algorithm='diagonalizable',eigenvalues=[4,8])
Traceback (most recent call last):
...
ValueError: the list of eigenvalues must have a list of dimensions corresponding to each eigenvalue.
A list of dimensions must be accompanied with a list of eigenvalues. ::
sage: random_matrix(QQ, 10,algorithm='diagonalizable',dimensions=[2,2,4,2])
Traceback (most recent call last):
...
ValueError: the list of dimensions must have a list of corresponding eigenvalues.
The sum of the eigenvalue dimensions must equal the size of the matrix. ::
sage: random_matrix(QQ,12,algorithm='diagonalizable',eigenvalues=[4,2,6,-1],dimensions=[2,3,5,1])
Traceback (most recent call last):
...
ValueError: the size of the matrix must equal the sum of the dimensions.
Each eigenspace dimension must be at least 1. ::
sage: random_matrix(QQ,9,algorithm='diagonalizable',eigenvalues=[-15,22,8,-4,90,12],dimensions=[4,2,2,4,-3,0])
Traceback (most recent call last):
...
ValueError: eigenspaces must have a dimension of at least 1.
Each eigenvalue must have a corresponding eigenspace dimension. ::
sage: random_matrix(QQ,12,algorithm='diagonalizable',eigenvalues=[4,2,6,-1],dimensions=[4,3,5])
Traceback (most recent call last):
...
ValueError: each eigenvalue must have a corresponding dimension and each dimension a corresponding eigenvalue.
Each dimension must have an eigenvalue paired to it. ::
sage: random_matrix(QQ,12,algorithm='diagonalizable',eigenvalues=[4,2,6],dimensions=[2,3,5,2])
Traceback (most recent call last):
...
ValueError: each eigenvalue must have a corresponding dimension and each dimension a corresponding eigenvalue.
.. TODO::
Modify the routine to allow for complex eigenvalues.
AUTHOR:
Billy Wonderly (2010-07)
"""
from sage.misc.prandom import randint
size = parent.nrows()
if parent.nrows() != parent.ncols():
raise TypeError("a diagonalizable matrix must be square.")
if eigenvalues is not None and dimensions is None:
raise ValueError("the list of eigenvalues must have a list of dimensions corresponding to each eigenvalue.")
if eigenvalues is None and dimensions is not None:
raise ValueError("the list of dimensions must have a list of corresponding eigenvalues.")
if eigenvalues is None and dimensions is None:
values = []
#create a list with "size" number of entries
for eigen_index in range(size):
eigenvalue = randint(-10, 10)
values.append(eigenvalue)
values.sort()
dimensions = []
eigenvalues = []
#create a list with no duplicate values to be the eigenvalues
for eigenvalue in range(size):
if values[eigenvalue] not in eigenvalues:
eigenvalues.append(values[eigenvalue])
for dimension in range(len(eigenvalues)):
#dimension is equal to how many times an eigenvalue was generated in the 'values' list
dimensions.append(values.count(eigenvalues[dimension]))
size_check = 0
for check in range(len(dimensions)):
size_check = size_check + dimensions[check]
if not all(x in ZZ for x in eigenvalues):
raise TypeError("eigenvalues must be integers.")
if size != size_check:
raise ValueError("the size of the matrix must equal the sum of the dimensions.")
if min(dimensions) < 1:
raise ValueError("eigenspaces must have a dimension of at least 1.")
if len(eigenvalues) != len(dimensions):
raise ValueError("each eigenvalue must have a corresponding dimension and each dimension a corresponding eigenvalue.")
#sort the dimensions in order of increasing size, and sort the eigenvalues list in an identical fashion, to maintain corresponding values.
dimensions_sort = sorted(zip(dimensions, eigenvalues))
dimensions = [x[0] for x in dimensions_sort]
eigenvalues = [x[1] for x in dimensions_sort]
#Create the matrix of eigenvalues on the diagonal. Use a lower limit and upper limit determined by the eigenvalue dimensions.
diagonal_matrix = matrix(QQ, size)
up_bound = 0
low_bound = 0
for row_index in range(len(dimensions)):
up_bound = up_bound + dimensions[row_index]
for entry in range(low_bound,up_bound):
diagonal_matrix[entry, entry] = eigenvalues[row_index]
low_bound=low_bound+dimensions[row_index]
# Create a matrix to hold each of the eigenvectors as its columns, begin with an identity matrix so that after row and column
# operations the resulting matrix will be unimodular.
eigenvector_matrix = matrix(QQ, size, size, 1)
upper_limit = 0
lower_limit = 0
#run the routine over the necessary number of columns corresponding eigenvalue dimension.
for dimension_index in range(len(dimensions)-1):
upper_limit=upper_limit+dimensions[dimension_index]
lowest_index_row_with_one=size-dimensions[dimension_index]
#assign a one to the row that is the eigenvalue dimension rows up from the bottom row then assign ones diagonally down to the right.
for eigen_ones in range(lower_limit,upper_limit):
eigenvector_matrix[lowest_index_row_with_one,eigen_ones]=1
lowest_index_row_with_one+=1
lower_limit=lower_limit+dimensions[dimension_index]
#Create a list to give the eigenvalue dimension corresponding to each column.
dimension_check = []
for i in range(len(dimensions)):
for k in range(dimensions[i]):
dimension_check.append(dimensions[i])
#run routine over the rows that are in the range of the protected ones. Use addition of column multiples to fill entries.
for dimension_multiplicity in range(max(dimensions),min(dimensions),-1):
highest_one_row=size-dimension_multiplicity
highest_one_column=0
#find the column with the protected one in the lowest indexed row.
while eigenvector_matrix[highest_one_row,highest_one_column]==0:
highest_one_column+=1
#dimension_check determines if column has a low enough eigenvalue dimension to take a column multiple.
for bottom_entry_filler in range(len(dimension_check)):
if dimension_check[bottom_entry_filler]<dimension_multiplicity and eigenvector_matrix[highest_one_row,bottom_entry_filler]==0:
# randint range determined experimentally to keep entries manageable.
eigenvector_matrix.add_multiple_of_column(bottom_entry_filler,highest_one_column,randint(-4,4))
#Fill remaining rows using scalar row addition.
for row in range(size-max(dimensions),size):
for upper_row in range(size-max(dimensions)):
# range of multiplier determined experimentally so that entries stay manageable for small matrices
eigenvector_matrix.add_multiple_of_row(upper_row,row,randint(-4,4))
return eigenvector_matrix*diagonal_matrix*(eigenvector_matrix.inverse())
@matrix_method
def vector_on_axis_rotation_matrix(v, i, ring=None):
r"""
Return a rotation matrix `M` such that `det(M)=1` sending the vector
`v` on the i-th axis so that all other coordinates of `Mv` are zero.
.. NOTE::
Such a matrix is not uniquely determined. This function returns one
such matrix.
INPUT:
- ``v``` -- vector
- ``i`` -- integer
- ``ring`` -- ring (optional, default: ``None``) of the resulting matrix
OUTPUT:
A matrix
EXAMPLES::
sage: from sage.matrix.constructor import vector_on_axis_rotation_matrix
sage: v = vector((1,2,3))
sage: vector_on_axis_rotation_matrix(v, 2) * v
(0, 0, sqrt(14))
sage: vector_on_axis_rotation_matrix(v, 1) * v
(0, sqrt(14), 0)
sage: vector_on_axis_rotation_matrix(v, 0) * v
(sqrt(14), 0, 0)
::
sage: x,y = var('x,y')
sage: v = vector((x,y))
sage: vector_on_axis_rotation_matrix(v, 1)
[ y/sqrt(x^2 + y^2) -x/sqrt(x^2 + y^2)]
[ x/sqrt(x^2 + y^2) y/sqrt(x^2 + y^2)]
sage: vector_on_axis_rotation_matrix(v, 0)
[ x/sqrt(x^2 + y^2) y/sqrt(x^2 + y^2)]
[-y/sqrt(x^2 + y^2) x/sqrt(x^2 + y^2)]
sage: vector_on_axis_rotation_matrix(v, 0) * v
(x^2/sqrt(x^2 + y^2) + y^2/sqrt(x^2 + y^2), 0)
sage: vector_on_axis_rotation_matrix(v, 1) * v
(0, x^2/sqrt(x^2 + y^2) + y^2/sqrt(x^2 + y^2))
::
sage: v = vector((1,2,3,4))
sage: vector_on_axis_rotation_matrix(v, 0) * v
(sqrt(30), 0, 0, 0)
sage: vector_on_axis_rotation_matrix(v, 0, ring=RealField(10))
[ 0.18 0.37 0.55 0.73]
[-0.98 0.068 0.10 0.14]
[ 0.00 -0.93 0.22 0.30]
[ 0.00 0.00 -0.80 0.60]
sage: vector_on_axis_rotation_matrix(v, 0, ring=RealField(10)) * v
(5.5, 0.00..., 0.00..., 0.00...)
AUTHORS:
Sébastien Labbé (April 2010)
"""
dim = len(v)
v = vector(v)
m = identity_matrix(dim, sparse=True)
L = list(range(i - 1, -1, -1)) + list(range(dim - 1, i, -1))
for i in L:
rot = ith_to_zero_rotation_matrix(v, i, ring=ring)
v = rot * v
m = rot * m
return m
@matrix_method
def ith_to_zero_rotation_matrix(v, i, ring=None):
r"""
Return a rotation matrix that sends the i-th coordinates of the
vector v to zero by doing a rotation with the (i-1)-th coordinate.
INPUT:
- ``v``` -- vector
- ``i`` -- integer
- ``ring`` -- ring (optional, default: ``None``) of the resulting matrix
OUTPUT:
A matrix
EXAMPLES::
sage: from sage.matrix.constructor import ith_to_zero_rotation_matrix
sage: v = vector((1,2,3))
sage: ith_to_zero_rotation_matrix(v, 2)
[ 1 0 0]
[ 0 2/13*sqrt(13) 3/13*sqrt(13)]
[ 0 -3/13*sqrt(13) 2/13*sqrt(13)]
sage: ith_to_zero_rotation_matrix(v, 2) * v
(1, sqrt(13), 0)
::
sage: ith_to_zero_rotation_matrix(v, 0)
[ 3/10*sqrt(10) 0 -1/10*sqrt(10)]
[ 0 1 0]
[ 1/10*sqrt(10) 0 3/10*sqrt(10)]
sage: ith_to_zero_rotation_matrix(v, 1)
[ 1/5*sqrt(5) 2/5*sqrt(5) 0]
[-2/5*sqrt(5) 1/5*sqrt(5) 0]
[ 0 0 1]
sage: ith_to_zero_rotation_matrix(v, 2)
[ 1 0 0]
[ 0 2/13*sqrt(13) 3/13*sqrt(13)]
[ 0 -3/13*sqrt(13) 2/13*sqrt(13)]
::
sage: ith_to_zero_rotation_matrix(v, 0) * v
(0, 2, sqrt(10))
sage: ith_to_zero_rotation_matrix(v, 1) * v
(sqrt(5), 0, 3)
sage: ith_to_zero_rotation_matrix(v, 2) * v
(1, sqrt(13), 0)
Other ring::
sage: ith_to_zero_rotation_matrix(v, 2, ring=RR)
[ 1.00000000000000 0.000000000000000 0.000000000000000]
[ 0.000000000000000 0.554700196225229 0.832050294337844]
[ 0.000000000000000 -0.832050294337844 0.554700196225229]
sage: ith_to_zero_rotation_matrix(v, 2, ring=RDF)
[ 1.0 0.0 0.0]
[ 0.0 0.5547001962252291 0.8320502943378437]
[ 0.0 -0.8320502943378437 0.5547001962252291]
On the symbolic ring::
sage: x,y,z = var('x,y,z')
sage: v = vector((x,y,z))
sage: ith_to_zero_rotation_matrix(v, 2)
[ 1 0 0]
[ 0 y/sqrt(y^2 + z^2) z/sqrt(y^2 + z^2)]
[ 0 -z/sqrt(y^2 + z^2) y/sqrt(y^2 + z^2)]
sage: ith_to_zero_rotation_matrix(v, 2) * v
(x, y^2/sqrt(y^2 + z^2) + z^2/sqrt(y^2 + z^2), 0)
TESTS::
sage: ith_to_zero_rotation_matrix((1,0,0), 0)
[ 0 0 -1]
[ 0 1 0]
[ 1 0 0]
sage: ith_to_zero_rotation_matrix((1,0,0), 1)
[1 0 0]
[0 1 0]
[0 0 1]
sage: ith_to_zero_rotation_matrix((1,0,0), 2)
[1 0 0]
[0 1 0]
[0 0 1]
AUTHORS:
Sébastien Labbé (April 2010)
"""
if ring is not None:
# coerce the vector so that computations
# are done in that ring
v = vector(ring, v)
dim = len(v)
i = i % dim
j = (i - 1) % dim
a, b = v[j], v[i]
if b == 0:
return identity_matrix(dim, sparse=True)
from sage.functions.all import sqrt
norm = sqrt(a * a + b * b)
aa = a / norm
bb = b / norm
entries = {(k, k): 1 for k in range(dim)}
entries.update({(j, j): aa, (j, i): bb, (i, j): -bb, (i, i): aa})
return matrix(entries, nrows=dim, ring=ring)
@matrix_method
def hilbert(dim, ring=QQ):
r"""
Return a Hilbert matrix of the given dimension.
The `n` dimensional Hilbert matrix is a square matrix with entries being
unit fractions,
.. MATH::
H_{ij} = \frac{1}{i+j-1},\qquad i, j = 1,\ldots, n.
For more information see the :wikipedia:`Hilbert_matrix`.
INPUT:
- ``dim`` -- integer, the dimension of the Hilbert matrix
- ``ring`` -- base ring (optional, default: \\QQ) of the resulting matrix
EXAMPLES::
sage: matrix.hilbert(5)
[ 1 1/2 1/3 1/4 1/5]
[1/2 1/3 1/4 1/5 1/6]
[1/3 1/4 1/5 1/6 1/7]
[1/4 1/5 1/6 1/7 1/8]
[1/5 1/6 1/7 1/8 1/9]
"""
def entries(i, j):
return ZZ.one() / (i + j + 1)
return matrix(entries, nrows=dim, ncols=dim, ring=ring)
@matrix_method
def vandermonde(v, ring=None):
r"""
Return a Vandermonde matrix of the given vector.
The `n` dimensional Vandermonde matrix is a square matrix with columns
being the powers of a given vector `v`,
.. MATH::
V_{ij} = v_i^{j-1},\qquad i, j = 1,\ldots, n.
For more information see the :wikipedia:`Vandermonde_matrix`.
INPUT:
- ``v`` -- vector, the second column of the Vandermonde matrix
- ``ring`` -- base ring (optional, default: None) of the resulting matrix
EXAMPLES:
A Vandermonde matrix of order three over the symbolic ring::
sage: matrix.vandermonde(SR.var(['x0', 'x1', 'x2']))
[ 1 x0 x0^2]
[ 1 x1 x1^2]
[ 1 x2 x2^2]
"""
def entries(i, j):
return v[i]**j
return matrix(entries, nrows=len(v), ncols=len(v), ring=ring)
@matrix_method
def toeplitz(c, r, ring=None):
r"""
Return a Toeplitz matrix of given first column and first row.
In a Toeplitz matrix, each descending diagonal from left to right is
constant, such that:
.. MATH:: T_{i,j} = T_{i+1, j+1}.
For more information see the :wikipedia:`Toeplitz_matrix`.
INPUT:
- ``c`` -- vector, first column of the Toeplitz matrix
- ``r`` -- vector, first row of the Toeplitz matrix, counting from the
second column
- ``ring`` -- base ring (optional, default: None) of the resulting matrix
EXAMPLES:
A rectangular Toeplitz matrix::
sage: matrix.toeplitz([1..4], [5..6])
[1 5 6]
[2 1 5]
[3 2 1]
[4 3 2]
The following `N\times N` Toeplitz matrix arises in the discretization of
boundary value problems::
sage: N = 4
sage: matrix.toeplitz([-2, 1] + [0]*(N-2), [1] + [0]*(N-2))
[-2 1 0 0]
[ 1 -2 1 0]
[ 0 1 -2 1]
[ 0 0 1 -2]
"""
def entries(i, j):
return c[i - j] if i >= j else r[j - i - 1]
return matrix(entries, nrows=len(c), ncols=len(r)+1, ring=ring)
@matrix_method
def hankel(c, r=None, ring=None):
r"""
Return a Hankel matrix of given first column and whose elements are zero
below the first anti-diagonal.
The Hankel matrix is symmetric and constant across the anti-diagonals,
with elements
.. MATH::
H_{ij} = v_{i+j-1},\qquad i = 1,\ldots, m,~j = 1,\ldots, n,
where the vector `v_i = c_i` for `i = 1,\ldots, m` and `v_{m+i} = r_i` for
`i = 1, \ldots, n-1` completely determines the Hankel matrix. If the last
row, `r`, is not given, the Hankel matrix is square by default and `r = 0`.
For more information see the :wikipedia:`Hankel_matrix`.
INPUT:
- ``c`` -- vector, first column of the Hankel matrix
- ``r`` -- vector (optional, default: None), last row of the Hankel matrix, from
the second to the last column
- ``ring`` -- base ring (optional, default: None) of the resulting matrix
EXAMPLES:
A Hankel matrix with symbolic entries::
sage: matrix.hankel(SR.var('a, b, c, d, e'))
[a b c d e]
[b c d e 0]
[c d e 0 0]
[d e 0 0 0]
[e 0 0 0 0]
We can also pass the elements of the last row, starting at the second column::
sage: matrix.hankel(SR.var('a, b, c, d, e'), SR.var('f, g, h, i'))
[a b c d e]
[b c d e f]
[c d e f g]
[d e f g h]
[e f g h i]
A third order Hankel matrix in the integers::
sage: matrix.hankel([1, 2, 3])
[1 2 3]
[2 3 0]
[3 0 0]
The second argument allows to customize the last row::
sage: matrix.hankel([1..3], [7..10])
[ 1 2 3 7 8]
[ 2 3 7 8 9]
[ 3 7 8 9 10]
"""
m = len(c)
r = [0] * (m - 1) if r is None else list(r)
n = len(r)
def entries(i):
return c[i] if i < m else r[i - m]
return matrix(lambda i, j: entries(i + j), nrows=m, ncols=n + 1, ring=ring)
| 36.417808
| 186
| 0.581839
|
1ba03062a604ccfd9b51f935ad76fe061c70b6e9
| 754
|
py
|
Python
|
middleware.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 17
|
2018-03-27T15:09:58.000Z
|
2020-05-13T11:32:43.000Z
|
middleware.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 59
|
2018-03-21T17:08:15.000Z
|
2021-12-13T19:47:37.000Z
|
middleware.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 11
|
2018-09-11T23:18:32.000Z
|
2021-12-15T08:43:58.000Z
|
import re
import pytz
from django.utils import timezone
from django.http import HttpResponse
#the below should hopefully make the middleware compatible with djangos 1.x and 2.x
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
class HealthCheckMiddleware(MiddlewareMixin):
def process_request(self, request):
if "/healthcheck" in request.path:
return HttpResponse('healthy: returned by middleware.HealthCheckMiddleware', status=200)
class TimezoneMiddleware(MiddlewareMixin):
def process_request(self, request):
tzname = 'US/Eastern'
if tzname:
timezone.activate(pytz.timezone(tzname))
else:
timezone.deactivate()
| 31.416667
| 100
| 0.732095
|
7c818c87a53a37e47d7b66cb7750035940b93f2b
| 1,082
|
py
|
Python
|
2020/14-b.py
|
asmundg/adventofcode
|
adc0c9c8ba1d0ef04b621f6f8a5237ee34b9a230
|
[
"MIT"
] | null | null | null |
2020/14-b.py
|
asmundg/adventofcode
|
adc0c9c8ba1d0ef04b621f6f8a5237ee34b9a230
|
[
"MIT"
] | null | null | null |
2020/14-b.py
|
asmundg/adventofcode
|
adc0c9c8ba1d0ef04b621f6f8a5237ee34b9a230
|
[
"MIT"
] | null | null | null |
import re
mask_re = re.compile(r"^mask = ([X01]+)$")
assign_re = re.compile(r"^mem\[(\d+)\] = (\d+)$")
def mask_out(number, mask, pos):
if pos >= len(mask):
return [number]
char = mask[len(mask) - 1 - pos]
if char == "1":
return mask_out(number | (1 << pos), mask, pos + 1)
if char == "0":
return mask_out(number, mask, pos + 1)
return mask_out(number | (1 << pos), mask, pos + 1) + mask_out(
number & ~(1 << pos), mask, pos + 1
)
def main():
mem = {}
with open("input/14.input") as f:
mask = "".join(["X" for _ in range(36)])
for line in f.readlines():
if mask_re.match(line):
(mask,) = mask_re.match(line).groups()
print(mask)
else:
address, number = assign_re.match(line).groups()
for masked_address in mask_out(int(address), mask, 0):
print(bin(int(address)), bin(masked_address), number)
mem[masked_address] = int(number)
print(sum(mem.values()))
main()
| 26.390244
| 73
| 0.511091
|
5d04845177c24a5c23bbdd83db20a6df01e7117b
| 1,255
|
py
|
Python
|
test/functional/rpc_deprecated.py
|
BioA3/BioA3
|
a7ad7021121aaa468b11a9925972e315cea70f50
|
[
"MIT"
] | null | null | null |
test/functional/rpc_deprecated.py
|
BioA3/BioA3
|
a7ad7021121aaa468b11a9925972e315cea70f50
|
[
"MIT"
] | null | null | null |
test/functional/rpc_deprecated.py
|
BioA3/BioA3
|
a7ad7021121aaa468b11a9925972e315cea70f50
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import BioA3TestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(BioA3TestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=estimatefee", "-deprecatedrpc=createmultisig"]]
def run_test(self):
self.log.info("estimatefee: Shows deprecated message")
assert_raises_rpc_error(-32, 'estimatefee is deprecated', self.nodes[0].estimatefee, 1)
self.log.info("Using -deprecatedrpc=estimatefee bypasses the error")
self.nodes[1].estimatefee(1)
self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
if __name__ == '__main__':
DeprecatedRpcTest().main()
| 44.821429
| 123
| 0.72988
|
3cbab0de8aaec3fd69811912da4dd3680553cee0
| 6,215
|
py
|
Python
|
Wrappers/Python/test/test_NexusReaderWriter.py
|
paskino/CIL
|
1803cbd445c408588fecbf705fb8b4df486029fc
|
[
"Apache-2.0"
] | null | null | null |
Wrappers/Python/test/test_NexusReaderWriter.py
|
paskino/CIL
|
1803cbd445c408588fecbf705fb8b4df486029fc
|
[
"Apache-2.0"
] | null | null | null |
Wrappers/Python/test/test_NexusReaderWriter.py
|
paskino/CIL
|
1803cbd445c408588fecbf705fb8b4df486029fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# CCP in Tomographic Imaging (CCPi) Core Imaging Library (CIL).
# Copyright 2017 UKRI-STFC
# Copyright 2017 University of Manchester
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import unittest
import os
from cil.io import NEXUSDataReader
from cil.io import NEXUSDataWriter
from cil.framework import AcquisitionData, AcquisitionGeometry, ImageData, ImageGeometry
import numpy
import shutil
class TestNexusReaderWriter(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.getcwd(), 'test_nxs')
if not os.path.exists(self.data_dir):
os.mkdir(self.data_dir)
self.ag2d = AcquisitionGeometry.create_Parallel2D()\
.set_angles([0, 90, 180],-3.0, 'radian')\
.set_panel(5, 0.2, origin='top-right')\
.set_channels(6)\
.set_labels(['horizontal', 'angle'])
self.ad2d = self.ag2d.allocate('random_int')
self.ag3d = AcquisitionGeometry.create_Cone3D([0.1,-500,2], [3,600,-1], [0,1,0],[0,0,-1],[0.2,-0.1,0.5],[-0.1,0.2,0.9])\
.set_angles([0, 90, 180])\
.set_panel([5,10],[0.1,0.3])\
self.ad3d = self.ag3d.allocate('random_int')
def tearDown(self):
shutil.rmtree(self.data_dir)
def test_writeImageData(self):
im_size = 5
ig = ImageGeometry(voxel_num_x = im_size,
voxel_num_y = im_size)
im = ig.allocate()
writer = NEXUSDataWriter()
writer.set_up(file_name = os.path.join(self.data_dir, 'test_nexus_im.nxs'),
data = im)
writer.write()
self.readImageDataAndTest()
def test_writeAcquisitionData(self):
writer = NEXUSDataWriter()
writer.set_up(file_name = os.path.join(self.data_dir, 'test_nexus_ad2d.nxs'),
data = self.ad2d)
writer.write()
writer = NEXUSDataWriter()
writer.set_up(file_name = os.path.join(self.data_dir, 'test_nexus_ad3d.nxs'),
data = self.ad3d)
writer.write()
self.readAcquisitionDataAndTest()
def readImageDataAndTest(self):
im_size = 5
ig_test = ImageGeometry(voxel_num_x = im_size,
voxel_num_y = im_size)
im_test = ig_test.allocate()
reader = NEXUSDataReader()
reader.set_up(file_name = os.path.join(self.data_dir, 'test_nexus_im.nxs'))
im = reader.read()
ig = reader.get_geometry()
assert ig == ig_test
numpy.testing.assert_array_equal(im.as_array(), im_test.as_array(), 'Loaded image is not correct')
self.assertEqual(ig.voxel_num_x, ig_test.voxel_num_x, 'ImageGeometry is not correct')
self.assertEqual(ig.voxel_num_y, ig_test.voxel_num_y, 'ImageGeometry is not correct')
def readAcquisitionDataAndTest(self):
reader2d = NEXUSDataReader()
reader2d.set_up(file_name = os.path.join(self.data_dir, 'test_nexus_ad2d.nxs'))
ad2d = reader2d.read()
ag2d = reader2d.get_geometry()
numpy.testing.assert_array_equal(ad2d.as_array(), self.ad2d.as_array(), 'Loaded image is not correct')
self.assertEqual(ag2d.geom_type, self.ag2d.geom_type, 'ImageGeometry.geom_type is not correct')
numpy.testing.assert_array_equal(ag2d.angles, self.ag2d.angles, 'ImageGeometry.angles is not correct')
self.assertEqual(ag2d.pixel_num_h, self.ag2d.pixel_num_h, 'ImageGeometry.pixel_num_h is not correct')
self.assertEqual(ag2d.pixel_size_h, self.ag2d.pixel_size_h, 'ImageGeometry.pixel_size_h is not correct')
self.assertEqual(ag2d.pixel_num_v, self.ag2d.pixel_num_v, 'ImageGeometry.pixel_num_v is not correct')
self.assertEqual(ag2d.pixel_size_v, self.ag2d.pixel_size_v, 'ImageGeometry.pixel_size_v is not correct')
assert ag2d == self.ag2d
reader3d = NEXUSDataReader()
reader3d.set_up(file_name = os.path.join(self.data_dir, 'test_nexus_ad3d.nxs'))
ad3d = reader3d.read()
ag3d = reader3d.get_geometry()
numpy.testing.assert_array_equal(ad3d.as_array(), self.ad3d.as_array(), 'Loaded image is not correct')
numpy.testing.assert_array_equal(ag3d.angles, self.ag3d.angles, 'AcquisitionGeometry.angles is not correct')
self.assertEqual(ag3d.geom_type, self.ag3d.geom_type, 'AcquisitionGeometry.geom_type is not correct')
self.assertEqual(ag3d.dimension, self.ag3d.dimension, 'AcquisitionGeometry.dimension is not correct')
self.assertEqual(ag3d.pixel_num_h, self.ag3d.pixel_num_h, 'AcquisitionGeometry.pixel_num_h is not correct')
self.assertEqual(ag3d.pixel_size_h, self.ag3d.pixel_size_h, 'AcquisitionGeometry.pixel_size_h is not correct')
self.assertEqual(ag3d.pixel_num_v, self.ag3d.pixel_num_v, 'AcquisitionGeometry.pixel_num_v is not correct')
self.assertEqual(ag3d.pixel_size_v, self.ag3d.pixel_size_v, 'AcquisitionGeometry.pixel_size_v is not correct')
self.assertEqual(ag3d.dist_source_center, self.ag3d.dist_source_center, 'AcquisitionGeometry.dist_source_center is not correct')
self.assertEqual(ag3d.dist_center_detector, self.ag3d.dist_center_detector, 'AcquisitionGeometry.dist_center_detector is not correct')
self.assertEqual(ag3d.channels, self.ag3d.channels, 'AcquisitionGeometry.channels is not correct')
assert ag3d == self.ag3d
if __name__ == '__main__':
unittest.main()
| 46.729323
| 142
| 0.665648
|
e16f3c19d19d7b9979e5033ab35e6733758774b4
| 10,618
|
py
|
Python
|
scripts/phylonet/treesy_cheesy.py
|
biothomme/--valencene
|
a1327c886d2c7e6f9709c0c502f3936abd01e452
|
[
"MIT"
] | null | null | null |
scripts/phylonet/treesy_cheesy.py
|
biothomme/--valencene
|
a1327c886d2c7e6f9709c0c502f3936abd01e452
|
[
"MIT"
] | null | null | null |
scripts/phylonet/treesy_cheesy.py
|
biothomme/--valencene
|
a1327c886d2c7e6f9709c0c502f3936abd01e452
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 14 18:09:40 2020
@author: Thomsn
"""
import argparse
import numpy as np
import os
import pandas as pd
def subtree(tr):
st =')'.join('('.join(tr.split('(')[1:]).split(')')[:-1])
return st
def tree_splits(tr):
comma_split = subtree(tr).split(',')
opener = np.cumsum([spl.count('(') for spl in comma_split])
closer = np.cumsum([spl.count(')') for spl in comma_split])
branching_point = [i for i, (op, cl) in enumerate(zip(opener, closer)) if op <= cl][0] + 1
return branching_point
def tree_splitter(tr):
comma_split = subtree(tr).split(',')
branching_point = tree_splits(tr)
lefter = ','.join(comma_split[:branching_point])
righter = ','.join(comma_split[branching_point:])
return [lefter, righter]
def taximize(subtr):
TAXA = ['vitisasia',
'vitisusa',
'vveast',
'vvwest',
'vsylveast',
'vsylvwest']
given_tax = [taxon for taxon in TAXA if taxon in subtr]
return given_tax
def get_the_node(tr):
import re
extension = tr.split(')')[-1]
i_node = re.match(r'I[0-9]*', extension)
return i_node[0]
def extract_brl(subtr):
brl = subtr.split(':')[-1]
return brl
def tree_list(tr):
jj = tree_splitter(tr)
trl = [get_the_node(tr),
taximize(jj[0]),
extract_brl(jj[0]),
jj[0],
taximize(jj[1]),
extract_brl(jj[1]),
jj[1],
tr]
return trl
def tree_sep_branches(tr):
BRCOL = ['node',
'branch_1',
'brl_1',
'str_1',
'branch_2',
'brl_2',
'str_2',
'tree_str']
binaframe = pd.DataFrame(columns = BRCOL)
max_nodes = tr.count(',')
binaframe = pd.DataFrame(columns = BRCOL)
binaframe.loc[str(len(binaframe) + 1)] = tree_list(tr)
while len(binaframe) < max_nodes:
for i in range(len(binaframe)):
for side in ['str_1', 'str_2']:
seltree = binaframe[side].values[i]
if seltree not in list(binaframe['tree_str'].values):
if len(taximize(seltree)) > 1:
binaframe.loc[str(len(binaframe) + 1)] = tree_list(seltree)
return binaframe
def compare_topos(tree_csv, topology=None):
if topology:
ts_ref = topology
else:
treeset = tree_csv['topology'].values
ts_ref = treeset[0]
reference = tree_sep_branches(ts_ref).loc[:,['node','branch_1','branch_2']]
for co, trs in tree_csv.iterrows():
tro = tree_sep_branches(trs['topology'])
share_node = []
for i in range(len(reference)):
curref = reference.copy().iloc[i,:]
for (rb, lb) in [('branch_1','branch_2'), ('branch_2','branch_1')]:
res = tro[rb].copy().values
les = tro[lb].copy().values
lefters = [j \
for j, (lf, rg) in \
enumerate(zip(res,les)) \
if (lf == curref['branch_1'] and rg == curref['branch_2'])]
if len(lefters) > 0:
share_node.append(tro['node'].copy().values[lefters[0]])
break
elif (rb, lb) == ('branch_2','branch_1'):
share_node.append(None)
reference.loc[:,trs[0]] = share_node
return reference
def summarize_topos(topology_matrix):
sel_mat = topology_matrix.iloc[:,3:].copy()
top_summary = topology_matrix.iloc[:,0:1].copy()
topo_count = len(sel_mat.iloc[1,:])
occ_list = []
for co, toprow in sel_mat.iterrows():
occs = sum([1 for el in toprow if el != None])
occ_list.append(occs)
top_summary.loc[:,'occurences'] = occ_list
top_summary.loc[:,'percentage'] = np.divide(occ_list, topo_count)
return top_summary
def make_bootstrap_str(tree_csv, topology_matrix = pd.DataFrame()):
if topology_matrix.empty:
topology_matrix = summarize_topos(compare_topos(tree_csv))
template_str = tree_csv['topology'].values[0]
for co, toporow in topology_matrix.iterrows():
nod = toporow['node']
val = f'{toporow["percentage"]:.3f}'
template_str = val.join(template_str.split(nod))
return template_str
def check_for_topo(tree_csv, topo_candidate):
for co, trs in tree_csv.iterrows():
tro = tree_sep_branches(trs['topology'])
share_node = []
for i in range(len(topo_candidate)):
curref = topo_candidate.copy().iloc[i,:]
for (rb, lb) in [('branch_1','branch_2'), ('branch_2','branch_1')]:
res = tro[rb].copy().values
les = tro[lb].copy().values
lefters = [j \
for j, (lf, rg) in \
enumerate(zip(res,les)) \
if (lf == curref['branch_1'] and rg == curref['branch_2'])]
if len(lefters) > 0:
share_node.append(tro['node'].copy().values[lefters[0]])
break
elif (rb, lb) == ('branch_2','branch_1'):
share_node.append(None)
topo_candidate.loc[:,trs[0]] = share_node
return topo_candidate
def get_all_topos(tree_csv):
tree_csv = all_trees
treeset = tree_csv['topology'].values
total_topoframe = pd.DataFrame()
if total_topoframe.size < 1:
nextr = 0
rank = [1]
entrance = True
while entrance:
reference = tree_sep_branches(treeset[nextr]).loc[:,['node','branch_1','branch_2']]
reference['topo_rank'] = reference.shape[0] * rank
reference['best_topo'] = reference.shape[0] * [treeset[nextr].split('\n')[0]]
if total_topoframe.size < 1:
total_topoframe = check_for_topo(tree_csv, reference)
else:
total_topoframe = pd.concat([total_topoframe,check_for_topo(tree_csv, reference)])
all_ranks = set(total_topoframe['topo_rank'])
bool_frame = pd.DataFrame(columns=all_ranks)
for rk in all_ranks:
frame_sel = total_topoframe.loc[total_topoframe['topo_rank'] == rk,:].iloc[:,5:]
bool_frame[rk] = [True if None in frame_sel[coln].values else False for i, coln in enumerate(frame_sel.columns)]
disc_topologies = [i for i in bool_frame.index if all(bool_frame.iloc[i,:].values) == True]
rank = [rank[0] + 1]
if len(disc_topologies) > 0:
nextr = disc_topologies[0]
else:
entrance = False
return total_topoframe
def all_topos_nodlab(tree_csv, total_topoframe):
sel_topo_sum = summarize_topos(total_topoframe).copy()
sel_topo_sum.columns = ['node', 'occurences', 'percentage']
sbt = make_bootstrap_str(tree_csv, sel_topo_sum)
return sbt
def inverse_bool(boolean):
return not boolean
def obt_likelihoods(tree_csv, total_topoframe):
NEWCOLS = ['rank', 'pseudo_likelihoods', 'replicates', 'best_topology']
all_ranks = set(total_topoframe['topo_rank'])
sum_frame = pd.DataFrame(columns = NEWCOLS)
for rk in all_ranks:
frame_full = total_topoframe.loc[total_topoframe['topo_rank'] == rk,:]
frame_sel = frame_full.copy().iloc[:,5:]
bf = [False if None in frame_sel[coln].values else True for i, coln in enumerate(frame_sel.columns)]
rf = [coln for coln in frame_sel.columns if not None in frame_sel[coln].values]
frame_sep = pd.concat([frame_full.copy().iloc[:,0:5],
frame_sel.loc[:, bf],
frame_sel.loc[:, list(map(inverse_bool, bf))]],
axis=1)
bto = all_topos_nodlab(tree_csv.loc[bf,:], frame_sep)
lk = list(tree_csv.loc[bf,'pseudo_likelihood'].values)
sum_frame.loc[rk, :] = [rk, lk, rf, bto]
return sum_frame
def dom_2_topo(tree_csv):
import re
TOP_TOPO = '((((vveast,vvwest)I4,vsylveast)I3,vsylvwest)I2,(vitisasia,vitisusa)I1)I0;\n'
BRANCH_LENGTH_PART = .25
reference = compare_topos(tree_csv, topology=TOP_TOPO).iloc[:,3:]
bf = [False if None in reference[coln].values else True for i, coln in enumerate(reference.columns)]
if any(bf):
ip_topo = tree_csv.loc[bf,:].iloc[0,:]['topology']
vsylv = re.search(r'vsylvwest:[0-9]*\.[0-9]*E?\-?[0-9]*',
ip_topo).group(0)
vv = re.search(r'vvwest:[0-9]*\.[0-9]*E?\-?[0-9]*',
ip_topo).group(0)
vvbl = float(re.search(r'[0-9]*\.[0-9]*E?\-?[0-9]*', vv).group(0))
newvvbls = [i * vvbl for i in [BRANCH_LENGTH_PART, 1-BRANCH_LENGTH_PART]]
vsylvbl = float(re.search(r'[0-9]*\.[0-9]*E?\-?[0-9]*', vsylv).group(0)) - newvvbls[1]
sylvec = re.sub(vsylv, f'(vsylvwest:{newvvbls[1]})#H1:{vsylvbl}', ip_topo)
input_topo = re.sub(vv, f'(vvwest:{newvvbls[1]})#H1:{newvvbls[0]}', sylvec)
else:
input_topo = tree_csv.loc[0,:]['topology']
return input_topo
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-i", action="store", dest="bigdir", required=True,
help="big dir with repdirs")
args = parser.parse_args()
BIG_DIR = args.bigdir
NEWCOLS = ['rank', 'pseudo_likelihoods', 'replicates', 'best_topology']
#BIG_DIR = 'Desktop/Studium/MEME Programme [current]/Universite de Montpellier [Sem2]/internship_scornavacca_lab/git_lab/phylogenetto/data/real_data/vitis/phylonet/2020_04_vitis_USA_2_summary_20_reps/'
os.chdir(BIG_DIR)
rep_list = [diro for diro in os.listdir() if 'rep_' in diro]
all_stars = pd.DataFrame(columns = NEWCOLS)
for repsdir in rep_list:
all_trees = pd.read_csv(f'{repsdir}/tree_sum.csv')
# topo_mat = compare_topos(all_trees)
# topo_sum = summarize_topos(topo_mat)
# bt = make_bootstrap_str(all_trees, topo_sum)
# best_trees = f'{repsdir}/best_tree_rep.new'
# with open(best_trees, 'w') as bff:
# bff.write(bt)
tree_csv = all_trees.copy()
summary_frame = obt_likelihoods(all_trees, get_all_topos(all_trees))
summary_frame.to_csv(f'{repsdir}/net_0_summary_seedrep.csv')
all_stars.loc[repsdir,:] = summary_frame.iloc[0,:]
topo_dir = f'{repsdir}/topologies_new_ret1'
if not os.path.exists(topo_dir):
os.makedirs(topo_dir)
input_topofile = f'{topo_dir}/ret_01_topo.new'
with open(input_topofile, 'w') as itf:
itf.write(dom_2_topo(tree_csv))
all_stars.to_csv('net_0_bt_samrep.csv')
| 39.619403
| 201
| 0.599736
|
fd914c27b7f51dba243c5b348fa6590cf41cd79c
| 26
|
py
|
Python
|
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/__init__.py
|
twig-aidash/stac-fastapi
|
1442e51b07d7db49ffeea6e2a311f5b9f1c8c9c7
|
[
"MIT"
] | null | null | null |
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/__init__.py
|
twig-aidash/stac-fastapi
|
1442e51b07d7db49ffeea6e2a311f5b9f1c8c9c7
|
[
"MIT"
] | null | null | null |
stac_fastapi/sqlalchemy/stac_fastapi/sqlalchemy/__init__.py
|
twig-aidash/stac-fastapi
|
1442e51b07d7db49ffeea6e2a311f5b9f1c8c9c7
|
[
"MIT"
] | null | null | null |
"""postgres submodule."""
| 13
| 25
| 0.653846
|
c08b0b807b2f4b5384e24c4a73bc82b6135971fd
| 585
|
py
|
Python
|
simple_latex/latex_document/text.py
|
hackla-engage/simple-pylatex
|
90edb839f7e34cfe117a6b30bc74baeac59eb800
|
[
"MIT"
] | null | null | null |
simple_latex/latex_document/text.py
|
hackla-engage/simple-pylatex
|
90edb839f7e34cfe117a6b30bc74baeac59eb800
|
[
"MIT"
] | null | null | null |
simple_latex/latex_document/text.py
|
hackla-engage/simple-pylatex
|
90edb839f7e34cfe117a6b30bc74baeac59eb800
|
[
"MIT"
] | null | null | null |
from .baseclass import DocumentBaseClass
from ..utils.transformations import latex_escape_regular_text
class TextClass(DocumentBaseClass):
def __init__(self, text, include_extra_newline_after=False, escape=True):
self.text = text
self.include_extra_newline_after = include_extra_newline_after
self.escape = escape
def __repr__(self):
# if self.escape:
# repr = latex_escape_regular_text(self.text)
# else:
repr = self.text
if self.include_extra_newline_after:
repr += "\n\n"
return repr
| 32.5
| 77
| 0.680342
|
90836f1ae8455ddcfc90d4cc0fd8ce8339fc3602
| 4,371
|
py
|
Python
|
rest_framework_simplejwt/serializers.py
|
olymk2/django-rest-framework-simplejwt
|
7d92d2452d478bc6f33a331b2dab08c9a8bbba66
|
[
"MIT"
] | null | null | null |
rest_framework_simplejwt/serializers.py
|
olymk2/django-rest-framework-simplejwt
|
7d92d2452d478bc6f33a331b2dab08c9a8bbba66
|
[
"MIT"
] | null | null | null |
rest_framework_simplejwt/serializers.py
|
olymk2/django-rest-framework-simplejwt
|
7d92d2452d478bc6f33a331b2dab08c9a8bbba66
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions, serializers
from .settings import api_settings
from .tokens import RefreshToken, SlidingToken, UntypedToken
class PasswordField(serializers.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('style', {})
kwargs['style']['input_type'] = 'password'
kwargs['write_only'] = True
super().__init__(*args, **kwargs)
class TokenObtainSerializer(serializers.Serializer):
user_model = get_user_model()
username_field = user_model.USERNAME_FIELD
default_error_messages = {
'no_active_account': _('No active account found with the given credentials')
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields[self.username_field] = serializers.CharField()
self.fields['password'] = PasswordField()
def validate(self, attrs):
authenticate_kwargs = {
self.username_field: attrs[self.username_field],
'password': attrs['password'],
}
try:
authenticate_kwargs['request'] = self.context['request']
except KeyError:
pass
self.user = authenticate(**authenticate_kwargs)
# Prior to Django 1.10, inactive users could be authenticated with the
# default `ModelBackend`. As of Django 1.10, the `ModelBackend`
# prevents inactive users from authenticating. App designers can still
# allow inactive users to authenticate by opting for the new
# `AllowAllUsersModelBackend`. However, we explicitly prevent inactive
# users from authenticating to enforce a reasonable policy and provide
# sensible backwards compatibility with older Django versions.
if self.user is None or not self.user.is_active:
raise exceptions.AuthenticationFailed(
self.error_messages['no_active_account'],
'no_active_account',
)
return {}
@classmethod
def get_token(cls, user):
raise NotImplementedError('Must implement `get_token` method for `TokenObtainSerializer` subclasses')
class TokenObtainPairSerializer(TokenObtainSerializer):
@classmethod
def get_token(cls, user):
return RefreshToken.for_user(user)
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
return data
class TokenObtainSlidingSerializer(TokenObtainSerializer):
@classmethod
def get_token(cls, user):
return SlidingToken.for_user(user)
def validate(self, attrs):
data = super().validate(attrs)
token = self.get_token(self.user)
data['token'] = str(token)
return data
class TokenRefreshSerializer(serializers.Serializer):
refresh = serializers.CharField()
def validate(self, attrs):
refresh = RefreshToken(attrs['refresh'])
data = {'access': str(refresh.access_token)}
if api_settings.ROTATE_REFRESH_TOKENS:
if api_settings.BLACKLIST_AFTER_ROTATION:
try:
# Attempt to blacklist the given refresh token
refresh.blacklist()
except AttributeError:
# If blacklist app not installed, `blacklist` method will
# not be present
pass
refresh.set_jti()
refresh.set_exp()
data['refresh'] = str(refresh)
return data
class TokenRefreshSlidingSerializer(serializers.Serializer):
token = serializers.CharField()
def validate(self, attrs):
token = SlidingToken(attrs['token'])
# Check that the timestamp in the "refresh_exp" claim has not
# passed
token.check_exp(api_settings.SLIDING_TOKEN_REFRESH_EXP_CLAIM)
# Update the "exp" claim
token.set_exp()
return {'token': str(token)}
class TokenVerifySerializer(serializers.Serializer):
token = serializers.CharField()
def validate(self, attrs):
UntypedToken(attrs['token'])
return {}
| 29.734694
| 109
| 0.652253
|
625cd1086b728276c68cf50152fcc0b0b99e0aaf
| 6,833
|
py
|
Python
|
Ho-Kashyap.py
|
THuang001/Assignments-PatternRecognition-2017Fall
|
f07a8c4b14f5b757e93edb39704d6239167098c1
|
[
"MIT"
] | 2
|
2019-11-07T10:23:37.000Z
|
2019-12-05T10:30:29.000Z
|
Ho-Kashyap.py
|
THuang001/Assignments-PatternRecognition-2017Fall
|
f07a8c4b14f5b757e93edb39704d6239167098c1
|
[
"MIT"
] | null | null | null |
Ho-Kashyap.py
|
THuang001/Assignments-PatternRecognition-2017Fall
|
f07a8c4b14f5b757e93edb39704d6239167098c1
|
[
"MIT"
] | null | null | null |
## Batch Perception、Ho-Kashyap、Batch Relaxation with Margin、Single-sample Relaxation with Margin
import numpy as np
import matplotlib.pyplot as plt
import time
Y1 = np.array([[1, 0.1, 1.1], [1, 6.8, 7.1], [1, -3.5, -4.1], [1, 2.0, 2.7], [1, 4.1, 2.8], [1, 3.1, 5.0], [1, -0.8, -1.3], [1, 0.9, 1.2], [1, 5.0, 6.4], [1, 3.9, 4.0]])
Y2 = np.array([[1, 7.1, 4.2], [1, -1.4, -4.3], [1, 4.5, 0.0], [1, 6.3, 1.6], [1, 4.2, 1.9], [1, 1.4, -3.2], [1, 2.4, -4.0], [1, 2.5, -6.1], [1, 8.4, 3.7], [1, 4.1, -2.2]])
Y3 = np.array([[1, -3.0, -2.9], [1, 0.5, 8.7], [1, 2.9, 2.1], [1, -0.1, 5.2], [1, -4.0, 2.2], [1, -1.3, 3.7], [1, -3.4, 6.2], [1, -4.1, 3.4], [1, -5.1, 1.6], [1, 1.9, 5.1]])
Y4 = np.array([[1, -2.0, -8.4], [1, -8.9, 0.2], [1, -4.2, -7.7], [1, -8.5, -3.2], [1, -6.7, -4.0], [1, -0.5, -9.2], [1, -5.3, -6.7], [1, -8.7, -6.4], [1, -7.1, -9.7], [1, -8, -6.3]])
N = len(Y1) # 10
d = len(Y1[0]) # 3
def visualDicisionBoundry(Ym, Yn, m, n, a, title):
"""
绘制两类样本点、决策边界
"""
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111)
# 数据点
ax.scatter(Ym[:, 1:2], Ym[:, 2:], s=30, label='$\omega_' + str(m) + '$')
ax.scatter(Yn[:, 1:2], Yn[:, 2:], s=30, label='$\omega_' + str(n) + '$', color='r')
# 决策边界: a.T * (1, x1, x2) = 0
x1 = np.arange(-6, 7.5, 0.1)
ax.plot(x1, (- a[1] * x1 - a[0]) / a[2], color='black', lw=1)
plt.xlabel('$x_1$'); plt.ylabel('$x_2$')
plt.title(title)
plt.legend(loc=2)
# plt.savefig(title + time.strftime('%m%d%H%M%S') + '.pdf', dpi=400)
plt.show()
def visualCriterion(k1, k2, criterion1, criterion2, title):
"""
绘制准则函数随着迭代次数变化的曲线
"""
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111)
ax.plot(np.arange(0, k1, 1), criterion1, marker ='s', c='red', lw=1.5, label='$b=0.1$')
ax.plot(np.arange(0, k2, 1), criterion2, marker ='s', c='blue', lw=1.5, label='$b=0.5$')
plt.xlabel('$k$'); plt.ylabel(r'$J_r$')
plt.title(title)
plt.legend(loc=1)
# plt.savefig(title + time.strftime('%m%d%H%M%S') + '.pdf', dpi=400)
plt.show()
def batchPerception(a1, eta1, Ym, Yn, m, n):
"""
批处理感知器算法
"""
Y = np.append(Ym, -Yn, axis=0) # 负类样本规范化
a = a1; eta = eta1
step = 0
for k in range(2, 1000):
yk = [i.reshape(d, 1) for i in Y if np.dot(np.transpose(a), i.reshape(d, 1)) <= 0] # print(k-2, len(Yk))
if not yk:
visualDicisionBoundry(Ym, Yn, m, n, a, 'Batch Perception')
return a, k-2
a = a + eta * sum(yk)
step = k-1
visualDicisionBoundry(Ym, Yn, m, n, a, 'Batch Perception')
return a, step
def hoKashyap(b1, eta1, b_min, Ym, Yn, m, n):
"""
Ho-Kashyap算法
"""
Y = np.append(Ym, -Yn, axis=0) # 负类样本需要规范化
b = b1
a = np.dot(np.linalg.pinv(Y), b); eta = eta1
for k in range(2, 1000):
e = np.dot(Y, a) - b
ee = 0.5 * (e + abs(e))
b = b + 2 * eta * ee
a = np.dot(np.linalg.pinv(Y), b)
if len(e[np.where(abs(e) > b_min)]) == 0: # 该条件满足则算法收敛
visualDicisionBoundry(Ym, Yn, m, n, a, 'Ho-Kashyap')
return np.linalg.norm(e)**2, k-1
if len(e[np.where(e <= 0)]) == 2*N: # 该条件满足则说明样本线性不可分: 误差向量e是没有正分量的非零向量
visualDicisionBoundry(Ym, Yn, m, n, a, 'Ho-Kashyap')
return np.linalg.norm(e)**2, k-1
def batchRelaxationMargin(a1, eta1, b, Ym, Yn, m, n):
"""
带裕量的批处理松弛算法
"""
Y = np.append(Ym, -Yn, axis=0) # 负类样本规范化 shape=(20, 3)
a = a1; eta = eta1
criterion = []
step = 0
while True:
Y_error = [x for x in Y if np.dot(x, a) <= b] # 当前迭代时分错的样本集合
print(step, len(Y_error))
if not len(Y_error): # 收敛
visualDicisionBoundry(Ym, Yn, m, n, a, 'Batch Relaxation with Margin($\eta_k=' + str(eta1) + ')$')
return step, criterion
criterion.append(np.sum([0.5 * (np.dot(y, a) - b)**2 / (np.linalg.norm(y))**2 for y in Y_error]))
a = a + eta * sum([(b - (np.dot(y, a)) / (np.linalg.norm(y))**2) * y.reshape(d, 1) for y in Y_error])
# for y in Y_error: a = a + eta * (b - (np.dot(y, a)) / (np.linalg.norm(y))**2) * y.reshape(d, 1)
if step > 1000: # 不收敛
visualDicisionBoundry(Ym, Yn, m, n, a, 'Batch Relaxation with Margin($\eta_k=' + str(eta1) + ')$')
return step+1, criterion
step += 1
def singleRelaxationMargin(a1, eta1, b, Ym, Yn, m, n):
"""
带裕量的单样本松弛算法
"""
Y = np.append(Ym, -Yn, axis=0) # 负类样本规范化 shape=(20, 3)
# 为使样本近似无穷次出现, 不断将两类样本加到Y中
for i in range(100):
if i % 2 == 0: Y = np.append(Y, Ym, axis=0)
else: Y = np.append(Y, -Yn, axis=0)
a = a1; eta = eta1
num = 0 # 记录连续多少个样本没被错误分类
cri = []
step = 0
for k in range(2, len(Y)):
yk = Y[k-2]
if np.dot(yk, a) <= b: # 更新参数
a = a + eta * ((b - np.dot(yk, a))/((np.linalg.norm(yk))**2) * yk.reshape(d, 1))
num = 0
Y_error = [y for y in Y if np.dot(y, a) <= b] # 当前迭代时分错的样本集合
cri.append(np.sum([0.5 * (np.dot(y, a) - b)**2 / (np.linalg.norm(y))**2 for y in Y_error])) # 当前迭代时的准则函数值
step += 1
else:
num += 1
if num == 2*N: # 连续20个样本都没被错误分类, 算法收敛
visualDicisionBoundry(Ym, Yn, m, n, a, 'Single-sample Relaxation with Margin($\eta_k=' + str(eta1) + ')$')
return step+1, cri
# 不收敛
visualDicisionBoundry(Ym, Yn, m, n, a, 'Single-sample Relaxation with Margin($\eta_k=' + str(eta1) + ')$')
return step, cri
if __name__ == '__main__':
## 第一题, Batch Perception
a1 = np.zeros(shape=(d, 1), dtype=float)
eta1 = 1
a12, step12 = batchPerception(a1, eta1, Y1, Y2, 1, 2); print(a12, step12)
a32, step32 = batchPerception(a1, eta1, Y3, Y2, 3, 2); print(a32, step32)
## 第二题, Ho-Kashyap
b1 = np.array([[1] * (2*N)]).reshape((2*N), 1)
eta1 = 0.8
b_min = 0.1
J13, step13 = hoKashyap(b1, eta1, b_min, Y1, Y3, 1, 3); print(J13, step13)
J24, step24 = hoKashyap(b1, eta1, b_min, Y2, Y4, 2, 4); print(J24, step24)
## 第三题, Batch Relaxation with Margin
a1 = np.zeros(shape=(d, 1), dtype=float)
eta1 = 1
step1, cri1 = batchRelaxationMargin(a1, eta1, 0.1, Y2, Y3, 2, 3); print(step1, '\n', cri1)
step2, cri2 = batchRelaxationMargin(a1, eta1, 0.5, Y2, Y3, 2, 3); print(step2, '\n', cri2)
visualCriterion(step1, step2, cri1, cri2, 'Batch Relaxation with Margin: Criterion($\eta_k=' + str(eta1) + ')$')
## 第三题, Single-sample Relaxation with Margin
step1, cri1= singleRelaxationMargin(a1, eta1, 0.1, Y2, Y3, 2, 3); print(step1, '\n', cri1)
step2, cri2= singleRelaxationMargin(a1, eta1, 0.5, Y2, Y3, 2, 3); print(step2, '\n', cri2)
visualCriterion(step1, step2, cri1, cri2, 'Single-sample Relaxation with Margin: Criterion\n($\eta_k=' + str(eta1) + ')$')
| 40.672619
| 182
| 0.524367
|
ec2b70ee3349b88210421bf7e3709e53024a825e
| 414
|
py
|
Python
|
Algorithm/Easy/1000+/1901SquaresofaSortedArray.py
|
MartinYan623/Lint-Code
|
57d2fa441d6496234615736e3f55d0b71aaa51dc
|
[
"MIT"
] | null | null | null |
Algorithm/Easy/1000+/1901SquaresofaSortedArray.py
|
MartinYan623/Lint-Code
|
57d2fa441d6496234615736e3f55d0b71aaa51dc
|
[
"MIT"
] | 1
|
2020-08-08T10:14:53.000Z
|
2020-08-08T10:18:37.000Z
|
Algorithm/Easy/1000+/1901SquaresofaSortedArray.py
|
MartinYan623/Lint-Code
|
57d2fa441d6496234615736e3f55d0b71aaa51dc
|
[
"MIT"
] | null | null | null |
#!/user/bin/env python
# -*- coding: utf-8 -*-
# @File : 1901SquaresofaSortedArray.py
# @Author : Martin Yan
# @Time : 2021/4/21 下午8:19
# @Software : PyCharm
class Solution:
"""
@param A: The array A.
@return: The array of the squares.
"""
def SquareArray(self, A):
# write your code here
for i in range(len(A)):
A[i] = A[i] * A[i]
A.sort()
return A
| 23
| 38
| 0.545894
|
5b942db232ec2a04b9a7125803eff2f95b1c0911
| 8,584
|
py
|
Python
|
custom_vendor/dm_control/dm_control/mujoco/wrapper/util.py
|
geyang/dmc_generalization
|
051fefc470931810c856e89629d481b06cc8d530
|
[
"MIT"
] | null | null | null |
custom_vendor/dm_control/dm_control/mujoco/wrapper/util.py
|
geyang/dmc_generalization
|
051fefc470931810c856e89629d481b06cc8d530
|
[
"MIT"
] | null | null | null |
custom_vendor/dm_control/dm_control/mujoco/wrapper/util.py
|
geyang/dmc_generalization
|
051fefc470931810c856e89629d481b06cc8d530
|
[
"MIT"
] | null | null | null |
# Copyright 2017-2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Various helper functions and classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import ctypes.util
import functools
import os
import platform
import sys
from dm_control import _render
import numpy as np
import six
from dm_control.utils import io as resources
_PLATFORM = platform.system()
try:
_PLATFORM_SUFFIX = {
"Linux": "linux",
"Darwin": "macos",
"Windows": "win64"
}[_PLATFORM]
except KeyError:
raise OSError("Unsupported platform: {}".format(_PLATFORM))
# Environment variables that can be used to override the default paths to the
# MuJoCo shared library and key file.
ENV_MJLIB_PATH = "MJLIB_PATH"
ENV_MJKEY_PATH = "MJKEY_PATH"
MJLIB_NAME = "mujoco200"
def _get_shared_library_filename():
"""Get platform-dependent prefix and extension of MuJoCo shared library."""
if _PLATFORM == "Linux":
prefix = "lib"
extension = "so"
elif _PLATFORM == "Darwin":
prefix = "lib"
extension = "dylib"
elif _PLATFORM == "Windows":
prefix = ""
extension = "dll"
else:
raise OSError("Unsupported platform: {}".format(_PLATFORM))
return "{}{}.{}".format(prefix, MJLIB_NAME, extension)
DEFAULT_MJLIB_DIR = "~/.mujoco/mujoco200_{}/bin".format(_PLATFORM_SUFFIX)
DEFAULT_MJLIB_PATH = os.path.join(
DEFAULT_MJLIB_DIR, _get_shared_library_filename())
DEFAULT_MJKEY_PATH = "~/.mujoco/mjkey.txt"
DEFAULT_ENCODING = sys.getdefaultencoding()
def to_binary_string(s):
"""Convert text string to binary."""
if isinstance(s, six.binary_type):
return s
return s.encode(DEFAULT_ENCODING)
def to_native_string(s):
"""Convert a text or binary string to the native string format."""
if six.PY3 and isinstance(s, six.binary_type):
return s.decode(DEFAULT_ENCODING)
elif six.PY2 and isinstance(s, six.text_type):
return s.encode(DEFAULT_ENCODING)
else:
return s
def _get_full_path(path):
expanded_path = os.path.expanduser(os.path.expandvars(path))
return resources.GetResourceFilename(expanded_path)
def _maybe_load_linux_dynamic_deps(library_dir):
"""Ensures that GL and GLEW symbols are available on Linux."""
interpreter_symbols = ctypes.cdll.LoadLibrary("")
if not hasattr(interpreter_symbols, "glewInit"):
# This means our interpreter is not yet linked against GLEW.
if _render.BACKEND == "osmesa":
libglew_path = os.path.join(library_dir, "libglewosmesa.so")
elif _render.BACKEND == "egl":
libglew_path = os.path.join(library_dir, "libglewegl.so")
else:
libglew_path = ctypes.util.find_library("GLEW")
ctypes.CDLL(libglew_path, ctypes.RTLD_GLOBAL) # Also loads GL implicitly.
def get_mjlib():
"""Loads `libmujoco.so` and returns it as a `ctypes.CDLL` object."""
try:
# Use the MJLIB_PATH environment variable if it has been set.
library_path = _get_full_path(os.environ[ENV_MJLIB_PATH])
except KeyError:
library_path = ctypes.util.find_library(MJLIB_NAME)
if not library_path:
library_path = _get_full_path(DEFAULT_MJLIB_PATH)
if not os.path.isfile(library_path):
raise OSError("Cannot find MuJoCo library at {}.".format(library_path))
if platform.system() == "Linux":
_maybe_load_linux_dynamic_deps(os.path.dirname(library_path))
return ctypes.cdll.LoadLibrary(library_path)
def get_mjkey_path():
"""Returns a path to the MuJoCo key file."""
raw_path = os.environ.get(ENV_MJKEY_PATH, DEFAULT_MJKEY_PATH)
return _get_full_path(raw_path)
class WrapperBase(object):
"""Base class for wrappers that provide getters/setters for ctypes structs."""
# This is needed so that the __del__ methods of MjModel and MjData can still
# succeed in cases where an exception occurs during __init__() before the _ptr
# attribute has been assigned.
_ptr = None
def __init__(self, ptr, model=None):
"""Constructs a wrapper instance from a `ctypes.Structure`.
Args:
ptr: `ctypes.POINTER` to the struct to be wrapped.
model: `MjModel` instance; needed by `MjDataWrapper` in order to get the
dimensions of dynamically-sized arrays at runtime.
"""
self._ptr = ptr
self._model = model
@property
def ptr(self):
"""Pointer to the underlying `ctypes.Structure` instance."""
return self._ptr
class CachedProperty(property):
"""A property that is evaluated only once per object instance."""
def __init__(self, func, doc=None):
super(CachedProperty, self).__init__(fget=func, doc=doc)
self._name = func.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj_dict = obj.__dict__
try:
return obj_dict[self._name]
except KeyError:
return obj_dict.setdefault(self._name, self.fget(obj))
def _as_array(src, shape):
"""Converts a native `dmc_gen` array to a managed numpy buffer.
Args:
src: A ctypes pointer or array.
shape: A tuple specifying the dimensions of the output array.
Returns:
A numpy array.
"""
# To work around a memory leak in numpy, we have to go through this
# frombuffer method instead of calling ctypeslib.as_array. See
# https://github.com/numpy/numpy/issues/6511
# return np.ctypeslib.as_array(dmc_gen, shape)
# This is part of the public API. See
# http://git.net/ml/python.ctypes/2008-02/msg00014.html
ctype = src._type_ # pylint: disable=protected-access
size = np.product(shape)
ptr = ctypes.cast(src, ctypes.POINTER(ctype * size))
buf = np.frombuffer(ptr.contents, dtype=ctype)
buf.shape = shape
# If we are wrapping an array of ctypes structs, return a `numpy.recarray`.
# This allows the fields of the struct to be accessed as attributes.
if issubclass(ctype, ctypes.Structure):
buf = buf.view(np.recarray)
return buf
def buf_to_npy(src, shape, np_dtype=None):
"""Returns a numpy array view of the contents of a ctypes pointer or array.
Args:
src: A ctypes pointer or array.
shape: A tuple specifying the dimensions of the output array.
np_dtype: A string or `np.dtype` object specifying the dtype of the output
array. If None, the dtype is inferred from the type of `dmc_gen`.
Returns:
A numpy array.
"""
# This causes a harmless RuntimeWarning about mismatching buffer format
# strings due to a bug in ctypes: http://stackoverflow.com/q/4964101/1461210
arr = _as_array(src, shape)
if np_dtype is not None:
arr.dtype = np_dtype
return arr
@functools.wraps(np.ctypeslib.ndpointer)
def ndptr(*args, **kwargs):
"""Wraps `np.ctypeslib.ndpointer` to allow passing None for NULL pointers."""
base = np.ctypeslib.ndpointer(*args, **kwargs)
def from_param(_, obj):
if obj is None:
return obj
else:
return base.from_param(obj)
return type(base.__name__, (base,), {"from_param": classmethod(from_param)})
_INVALID_CALLBACK_TYPE = "value must be callable, c_void_p, or None: got {!r}"
def cast_func_to_c_void_p(func, cfunctype):
"""Casts a native function pointer or a Python callable into `c_void_p`.
Args:
func: A callable, or a `c_void_p` pointing to a native function, or `None`.
cfunctype: A `CFUNCTYPE` prototype that is used to wrap `func` if it is
a Python callable.
Returns:
A tuple `(func_ptr, wrapped_pyfunc)`, where `func_ptr` is a `c_void_p`
object, and `wrapped_pyfunc` is a `CFUNCTYPE` object that wraps `func` if
it is a Python callable. (If `func` is not a Python callable then
`wrapped_pyfunc` is `None`.)
"""
if not (callable(func) or isinstance(func, ctypes.c_void_p) or func is None):
raise TypeError(_INVALID_CALLBACK_TYPE.format(func))
try:
new_func_ptr = ctypes.cast(func, ctypes.c_void_p)
wrapped_pyfunc = None
except ctypes.ArgumentError:
wrapped_pyfunc = cfunctype(func)
new_func_ptr = ctypes.cast(wrapped_pyfunc, ctypes.c_void_p)
return new_func_ptr, wrapped_pyfunc
| 31.675277
| 80
| 0.715051
|
d8bb2461dab4c13758acf9655af33534c9ef2a41
| 5,071
|
py
|
Python
|
packages/python/plotly/plotly/validators/streamtube/__init__.py
|
c-chaitanya/plotly.py
|
7bda89c77559747e67fb1608bf9309e97505a4f2
|
[
"MIT"
] | 7
|
2021-09-29T09:46:36.000Z
|
2022-03-24T08:30:41.000Z
|
packages/python/plotly/plotly/validators/streamtube/__init__.py
|
c-chaitanya/plotly.py
|
7bda89c77559747e67fb1608bf9309e97505a4f2
|
[
"MIT"
] | 1
|
2021-09-30T16:56:21.000Z
|
2021-10-15T09:14:12.000Z
|
packages/python/plotly/plotly/validators/streamtube/__init__.py
|
c-chaitanya/plotly.py
|
7bda89c77559747e67fb1608bf9309e97505a4f2
|
[
"MIT"
] | 1
|
2021-09-29T22:34:05.000Z
|
2021-09-29T22:34:05.000Z
|
import sys
if sys.version_info < (3, 7):
from ._zsrc import ZsrcValidator
from ._zhoverformat import ZhoverformatValidator
from ._z import ZValidator
from ._ysrc import YsrcValidator
from ._yhoverformat import YhoverformatValidator
from ._y import YValidator
from ._xsrc import XsrcValidator
from ._xhoverformat import XhoverformatValidator
from ._x import XValidator
from ._wsrc import WsrcValidator
from ._whoverformat import WhoverformatValidator
from ._w import WValidator
from ._vsrc import VsrcValidator
from ._visible import VisibleValidator
from ._vhoverformat import VhoverformatValidator
from ._v import VValidator
from ._usrc import UsrcValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._uhoverformat import UhoverformatValidator
from ._u import UValidator
from ._text import TextValidator
from ._stream import StreamValidator
from ._starts import StartsValidator
from ._sizeref import SizerefValidator
from ._showscale import ShowscaleValidator
from ._showlegend import ShowlegendValidator
from ._scene import SceneValidator
from ._reversescale import ReversescaleValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._maxdisplayed import MaxdisplayedValidator
from ._lightposition import LightpositionValidator
from ._lighting import LightingValidator
from ._legendrank import LegendrankValidator
from ._legendgroup import LegendgroupValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hovertext import HovertextValidator
from ._hovertemplatesrc import HovertemplatesrcValidator
from ._hovertemplate import HovertemplateValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._colorscale import ColorscaleValidator
from ._colorbar import ColorbarValidator
from ._coloraxis import ColoraxisValidator
from ._cmin import CminValidator
from ._cmid import CmidValidator
from ._cmax import CmaxValidator
from ._cauto import CautoValidator
from ._autocolorscale import AutocolorscaleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._zsrc.ZsrcValidator",
"._zhoverformat.ZhoverformatValidator",
"._z.ZValidator",
"._ysrc.YsrcValidator",
"._yhoverformat.YhoverformatValidator",
"._y.YValidator",
"._xsrc.XsrcValidator",
"._xhoverformat.XhoverformatValidator",
"._x.XValidator",
"._wsrc.WsrcValidator",
"._whoverformat.WhoverformatValidator",
"._w.WValidator",
"._vsrc.VsrcValidator",
"._visible.VisibleValidator",
"._vhoverformat.VhoverformatValidator",
"._v.VValidator",
"._usrc.UsrcValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._uhoverformat.UhoverformatValidator",
"._u.UValidator",
"._text.TextValidator",
"._stream.StreamValidator",
"._starts.StartsValidator",
"._sizeref.SizerefValidator",
"._showscale.ShowscaleValidator",
"._showlegend.ShowlegendValidator",
"._scene.SceneValidator",
"._reversescale.ReversescaleValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._maxdisplayed.MaxdisplayedValidator",
"._lightposition.LightpositionValidator",
"._lighting.LightingValidator",
"._legendrank.LegendrankValidator",
"._legendgroup.LegendgroupValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._hovertext.HovertextValidator",
"._hovertemplatesrc.HovertemplatesrcValidator",
"._hovertemplate.HovertemplateValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfosrc.HoverinfosrcValidator",
"._hoverinfo.HoverinfoValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._colorscale.ColorscaleValidator",
"._colorbar.ColorbarValidator",
"._coloraxis.ColoraxisValidator",
"._cmin.CminValidator",
"._cmid.CmidValidator",
"._cmax.CmaxValidator",
"._cauto.CautoValidator",
"._autocolorscale.AutocolorscaleValidator",
],
)
| 40.568
| 60
| 0.68448
|
3bd29a72a125490b591734838cbc348703b21121
| 12,646
|
py
|
Python
|
tests/components/switch/test_device_automation.py
|
cpptrain/home-assistant
|
a93715d770b2229b306f6d9f92318b44ff96c7a7
|
[
"Apache-2.0"
] | 1
|
2019-09-17T03:26:56.000Z
|
2019-09-17T03:26:56.000Z
|
tests/components/switch/test_device_automation.py
|
cpptrain/home-assistant
|
a93715d770b2229b306f6d9f92318b44ff96c7a7
|
[
"Apache-2.0"
] | null | null | null |
tests/components/switch/test_device_automation.py
|
cpptrain/home-assistant
|
a93715d770b2229b306f6d9f92318b44ff96c7a7
|
[
"Apache-2.0"
] | null | null | null |
"""The test for switch device automation."""
import pytest
from homeassistant.components.switch import DOMAIN
from homeassistant.const import STATE_ON, STATE_OFF, CONF_PLATFORM
from homeassistant.setup import async_setup_component
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import (
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.helpers import device_registry
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, "test", "automation")
def _same_lists(a, b):
if len(a) != len(b):
return False
for d in a:
if d not in b:
return False
return True
async def test_get_actions(hass, device_reg, entity_reg):
"""Test we get the expected actions from a switch."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"domain": DOMAIN,
"type": "toggle",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
actions = await async_get_device_automations(
hass, "async_get_actions", device_entry.id
)
assert _same_lists(actions, expected_actions)
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a switch."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
conditions = await async_get_device_automations(
hass, "async_get_conditions", device_entry.id
)
assert _same_lists(conditions, expected_conditions)
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a switch."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(
hass, "async_get_triggers", device_entry.id
)
assert _same_lists(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(ent1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "turn_off state - {} - on - off - None".format(
ent1.entity_id
)
hass.states.async_set(ent1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "turn_on state - {} - off - on - None".format(
ent1.entity_id
)
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "is_on",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "is_off",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event.event_type"))
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_on event - test_event1"
hass.states.async_set(ent1.entity_id, STATE_OFF)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_off event - test_event2"
async def test_action(hass, calls):
"""Test for turn_on and turn_off actions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
ent1, ent2, ent3 = platform.ENTITIES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_off",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "turn_on",
},
},
{
"trigger": {"platform": "event", "event_type": "test_event3"},
"action": {
"domain": DOMAIN,
"device_id": "",
"entity_id": ent1.entity_id,
"type": "toggle",
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event1")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_OFF
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert hass.states.get(ent1.entity_id).state == STATE_ON
| 33.812834
| 87
| 0.500474
|
3f108ca47019bbafac04ef8ba678bd03cc071e73
| 2,193
|
py
|
Python
|
examples/example2-2-api-client-all-purpose-agent.py
|
Orange-OpenSource/pyngsi
|
86bdb3218850b82d219278b831a3e96b0fb4655b
|
[
"Apache-2.0"
] | 1
|
2021-11-05T16:45:04.000Z
|
2021-11-05T16:45:04.000Z
|
examples/example2-2-api-client-all-purpose-agent.py
|
Orange-OpenSource/pyngsi
|
86bdb3218850b82d219278b831a3e96b0fb4655b
|
[
"Apache-2.0"
] | null | null | null |
examples/example2-2-api-client-all-purpose-agent.py
|
Orange-OpenSource/pyngsi
|
86bdb3218850b82d219278b831a3e96b0fb4655b
|
[
"Apache-2.0"
] | 1
|
2021-06-22T09:14:15.000Z
|
2021-06-22T09:14:15.000Z
|
#!/usr/bin/env python3
# v2.1.9 introduces SourceFunc to facilitate the creation of an agent that retrieves its data from an API
# No need anymore to inherit from the Source class
# Suitable for all APIs
import requests
from typing import List
from pyngsi.agent import NgsiAgent
from pyngsi.sources.source import Row
from pyngsi.sources.more_sources import SourceFunc
from pyngsi.sink import SinkStdout
from pyngsi.ngsi import DataModel
GH_URL = "https://api.github.com"
GH_ENDPOINT = "/repos"
def retrieve_latest_commits(user: str = "numpy", repo: str = "numpy", ncommits: int = 5) -> List:
url = f"{GH_URL}{GH_ENDPOINT}/{user}/{repo}/commits"
headers = {'Application': 'application/vnd.github.v3+json'}
params = {'per_page': ncommits}
response = requests.get(
url, headers=headers, params=params)
response.raise_for_status()
# returns the Python object parsed from the JSON string result (here an array)
# it's ok here because the API returns a JSON array
return response.json()
def build_entity(row: Row) -> DataModel:
sha = row.record['sha']
commit = row.record['commit']
author = commit['author']
m = DataModel(id=f"{sha}-GitCommit-Numpy-{row.provider}", type="GitCommit")
m.add("author", author['name'])
m.add("dateObserved", author['date'], isdate=True)
m.add("message", commit['message'], urlencode=True)
return m
def main():
# just provide SourceApi with your own function (that returns an array)
# default is to retrieve 5 commits
# override the default provider "api" with "github"
src = SourceFunc(retrieve_latest_commits, "github")
# in case you want to retrieve 3 commits, you could use lambda
# src = SourceFunc(lambda: retrieve_latest_commits(ncommits=3), "github")
# one could prefer to use the partial function from functools
# src = SourceFunc(partial(retrieve_latest_commits, ncommits=3), "github")
# if you have an Orion server available, just replace SinkStdout() with SinkOrion()
sink = SinkStdout()
agent = NgsiAgent.create_agent(src, sink, process=build_entity)
agent.run()
agent.close()
if __name__ == '__main__':
main()
| 32.731343
| 105
| 0.70725
|
94704db1d518912c5edd54b5e7ab89f7f0a0fc49
| 333
|
py
|
Python
|
account/compat.py
|
ProgrammingLanguageLeader/django-user-accounts
|
ad301bd57ae65c456ed6e45ae3b5cb06d4e2378b
|
[
"MIT"
] | 4
|
2017-01-11T21:00:31.000Z
|
2020-02-05T20:41:13.000Z
|
account/compat.py
|
ProgrammingLanguageLeader/django-user-accounts
|
ad301bd57ae65c456ed6e45ae3b5cb06d4e2378b
|
[
"MIT"
] | 10
|
2019-12-26T17:31:31.000Z
|
2022-03-21T22:17:33.000Z
|
account/compat.py
|
ProgrammingLanguageLeader/django-user-accounts
|
ad301bd57ae65c456ed6e45ae3b5cb06d4e2378b
|
[
"MIT"
] | 9
|
2015-02-02T20:30:12.000Z
|
2017-12-18T18:53:15.000Z
|
import django
try:
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
except ImportError:
from django.urls import resolve, reverse, NoReverseMatch # noqa
def is_authenticated(user):
if django.VERSION >= (1, 10):
return user.is_authenticated
else:
return user.is_authenticated()
| 23.785714
| 73
| 0.723724
|
f9faa35fb9d384829e41ab1b17f2b53a1854c64f
| 2,438
|
py
|
Python
|
blaze/bcolz.py
|
chdoig/blaze
|
caa5a497e1ca1ceb1cf585483312ff4cd74d0bda
|
[
"BSD-3-Clause"
] | 1
|
2015-01-18T23:59:57.000Z
|
2015-01-18T23:59:57.000Z
|
blaze/bcolz.py
|
chdoig/blaze
|
caa5a497e1ca1ceb1cf585483312ff4cd74d0bda
|
[
"BSD-3-Clause"
] | null | null | null |
blaze/bcolz.py
|
chdoig/blaze
|
caa5a497e1ca1ceb1cf585483312ff4cd74d0bda
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import bcolz
from bcolz import carray, ctable
import numpy as np
from pandas import DataFrame
from collections import Iterator
from toolz import partition_all
from .dispatch import dispatch
from .compute.bcolz import *
__all__ = ['into', 'bcolz', 'chunks']
@dispatch(type, (ctable, carray))
def into(a, b, **kwargs):
f = into.dispatch(a, type(b))
return f(a, b, **kwargs)
@dispatch((tuple, set, list), (ctable, carray))
def into(o, b):
return into(o, into(np.ndarray(0), b))
@dispatch(np.ndarray, (ctable, carray))
def into(a, b, **kwargs):
return b[:]
@dispatch(ctable, np.ndarray)
def into(a, b, **kwargs):
return ctable(b, **kwargs)
@dispatch(carray, np.ndarray)
def into(a, b, **kwargs):
return carray(b, **kwargs)
@dispatch(carray, (tuple, list))
def into(a, b, dtype=None, **kwargs):
x = into(np.ndarray(0), b, dtype=dtype)
return into(a, x, **kwargs)
@dispatch(ctable, (tuple, list))
def into(a, b, types=None, **kwargs):
if isinstance(b[0], (tuple, list)):
if not types:
types=[None] * len(b[0])
return ctable([into(np.ndarray(0), c2, dtype=dt)
for (c2, dt) in zip(zip(*b), types)],
**kwargs)
else:
return ctable([into(np.ndarray(0), b, dtype=types)],
**kwargs)
@dispatch((carray, ctable), Iterator)
def into(a, b, **kwargs):
chunks = partition_all(1024, b)
chunk = next(chunks)
a = into(a, chunk, **kwargs)
for chunk in chunks:
a.append(list(zip(*chunk)))
a.flush()
return a
@dispatch(DataFrame, ctable)
def into(a, b, columns=None, schema=None):
if not columns and schema:
columns = dshape(schema)[0].names
return DataFrame.from_items(((column, b[column][:]) for column in
sorted(b.names)),
orient='columns',
columns=columns)
from .compute.chunks import ChunkIterator, chunks
@dispatch((carray, ctable), ChunkIterator)
def into(a, b, **kwargs):
b = iter(b)
a = into(a, next(b), **kwargs)
for chunk in b:
a.append(into(np.ndarray(0), chunk))
a.flush()
return a
from blaze.data.core import DataDescriptor
@dispatch(DataDescriptor, (ctable, carray))
def into(a, b, **kwargs):
a.extend_chunks(chunks(b))
return a
| 24.626263
| 69
| 0.603774
|
e004a259737af345a3be3404258467f877024965
| 1,330
|
py
|
Python
|
catkin_ws/build/catkin_generated/generate_cached_setup.py
|
willdavidc/piel
|
ab6abbac2b5683dc7d477754653e170a1e67f9bc
|
[
"MIT"
] | null | null | null |
catkin_ws/build/catkin_generated/generate_cached_setup.py
|
willdavidc/piel
|
ab6abbac2b5683dc7d477754653e170a1e67f9bc
|
[
"MIT"
] | null | null | null |
catkin_ws/build/catkin_generated/generate_cached_setup.py
|
willdavidc/piel
|
ab6abbac2b5683dc7d477754653e170a1e67f9bc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/pi/Desktop/piel/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/pi/Desktop/piel/catkin_ws/devel/env.sh')
output_filename = '/home/pi/Desktop/piel/catkin_ws/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| 42.903226
| 102
| 0.736842
|
5db81f4348769bdd8cfdd71f2e02741530552433
| 1,549
|
py
|
Python
|
Algorithms/mf_Linear.py
|
ParikhKadam/Forecasting_Mutual_Funds
|
7d033ff7370114967ba3facb7e243b95c3a6540d
|
[
"MIT"
] | 15
|
2020-12-22T04:58:39.000Z
|
2022-03-17T10:34:20.000Z
|
Algorithms/mf_Linear.py
|
ParikhKadam/Forecasting_Mutual_Funds
|
7d033ff7370114967ba3facb7e243b95c3a6540d
|
[
"MIT"
] | 1
|
2021-12-17T04:20:10.000Z
|
2021-12-18T03:58:36.000Z
|
Algorithms/mf_Linear.py
|
ParikhKadam/Forecasting_Mutual_Funds
|
7d033ff7370114967ba3facb7e243b95c3a6540d
|
[
"MIT"
] | 12
|
2020-10-11T11:33:35.000Z
|
2022-02-18T06:50:50.000Z
|
from Algorithms import *
def linear(df):
days = 30
last_week = df.iloc[-days:]
last_day = df.iloc[-1:]
df_new = df
df_new['date'] = pd.to_datetime(df_new['date'], dayfirst=True)
df_new['Prev CloseNAV'] = df_new['nav']
df_new['NAV'] = df_new['nav'].shift(1)
df_new.set_index('date', inplace=True)
X = df_new[['Prev CloseNAV']][1:]
y = df_new['NAV'][1:]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# To Train model on previous data
model = LinearRegression().fit(X_train, y_train)
r_sq = model.score(X_test, y_test)
# print('confidence of determination:', r_sq)
# print('intercept:', model.intercept_)
# print('slope:', model.coef_)
# for Test purpose to check confidence of model
y_pred_test = model.predict(X_test)
#RMSE
rmse= math.sqrt(mean_squared_error(y_test,y_pred_test))
# Actual prediction
pre_date = date.today()
day_index = [(pre_date + dt.timedelta(days=i)) for i in range(1,31)]
last_week = [float(row['nav']) for index,row in last_week.iterrows()]
# 1 day forecasting
X_new = pd.DataFrame(float(last_day['nav']),columns =['Prev CloseNAV'],index=[pre_date])
y_pred = model.predict(X_new)
# print("1 day prediction :",y_pred)
y_pred_30 = [y_pred]
# month forecasting
for i in range(0,29):
X_new = pd.DataFrame(y_pred,columns =['Prev CloseNAV'],index=[day_index[i]])
y_pred = model.predict(X_new)
y_pred_30.append(y_pred)
return y_pred_30, rmse
| 31.612245
| 92
| 0.647515
|
6f545345eeb93a9f7e2ed2ac8fe90389490ae66d
| 4,407
|
py
|
Python
|
src/utils/cameraspoofer/cameraspooferprocess.py
|
KeithAzzopardi1998/BFMC_Startup
|
bb5b422323f9e710edff33d7ba7a1683b31249a1
|
[
"BSD-3-Clause"
] | 1
|
2020-01-23T16:09:26.000Z
|
2020-01-23T16:09:26.000Z
|
src/utils/cameraspoofer/cameraspooferprocess.py
|
KeithAzzopardi1998/BFMC_Startup
|
bb5b422323f9e710edff33d7ba7a1683b31249a1
|
[
"BSD-3-Clause"
] | null | null | null |
src/utils/cameraspoofer/cameraspooferprocess.py
|
KeithAzzopardi1998/BFMC_Startup
|
bb5b422323f9e710edff33d7ba7a1683b31249a1
|
[
"BSD-3-Clause"
] | 2
|
2020-04-07T23:36:56.000Z
|
2020-04-10T18:14:01.000Z
|
# Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import cv2
import glob
import time
from threading import Thread
from src.utils.templates.workerprocess import WorkerProcess
class CameraSpooferProcess(WorkerProcess):
#================================ INIT ===============================================
def __init__(self, inPs,outPs, videoDir, ext = '.h264'):
"""Processed used for spoofing a camera/ publishing a video stream from a folder
with videos
Parameters
----------
inPs : list(Pipe)
outPs : list(Pipe)
list of output pipes(order does not matter)
videoDir : [str]
path to a dir with videos
ext : str, optional
the extension of the file, by default '.h264'
"""
super(CameraSpooferProcess,self).__init__(inPs,outPs)
# params
self.videoSize = (640,480)
self.videoDir = videoDir
self.videos = self.open_files(self.videoDir, ext = ext)
# ===================================== INIT VIDEOS ==================================
def open_files(self, inputDir, ext):
"""Open all files with the given path and extension
Parameters
----------
inputDir : string
the input directory absolute path
ext : string
the extention of the files
Returns
-------
list
A list of the files in the folder with the specified file extension.
"""
files = glob.glob(inputDir + '/*' + ext)
return files
# ===================================== INIT THREADS =================================
def _init_threads(self):
"""Initialize the thread of the process.
"""
thPlay = Thread(name='VideoPlayerThread',target= self.play_video, args=(self.videos, ))
self.threads.append(thPlay)
# ===================================== PLAY VIDEO ===================================
def play_video(self, videos):
"""Iterate through each video in the folder, open a cap and publish the frames.
Parameters
----------
videos : list(string)
The list of files with the videos.
"""
while True:
for video in videos:
cap = cv2.VideoCapture(video)
while True:
ret, frame = cap.read()
stamp = time.time()
if ret:
frame = cv2.resize(frame, self.videoSize)
for p in self.outPs:
p.send([[stamp], frame])
else:
break
cap.release()
| 37.033613
| 95
| 0.571818
|
7bed72975a738781ea2c7279332e45f8966b1445
| 3,110
|
py
|
Python
|
tests/MLAlgorithms (Single Node Dense)/src/np_algs.py
|
ADALabUCSD/SLAB
|
86d71b345c50b3a73eefcad3da39dc8d919d9652
|
[
"Apache-2.0"
] | 2
|
2020-02-23T02:42:52.000Z
|
2021-11-04T06:30:13.000Z
|
tests/MLAlgorithms (Single Node Dense)/src/np_algs.py
|
ADALabUCSD/SLAB
|
86d71b345c50b3a73eefcad3da39dc8d919d9652
|
[
"Apache-2.0"
] | null | null | null |
tests/MLAlgorithms (Single Node Dense)/src/np_algs.py
|
ADALabUCSD/SLAB
|
86d71b345c50b3a73eefcad3da39dc8d919d9652
|
[
"Apache-2.0"
] | 1
|
2021-01-10T15:27:15.000Z
|
2021-01-10T15:27:15.000Z
|
# Copyright 2018 Anthony H Thomas and Arun Kumar
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
with open(__file__) as fh: print fh.read()
import os
import sys
import numpy as np
import numpy.linalg as alg
import pandas as pd
ROOT = os.getenv('BENCHMARK_PROJECT_ROOT')
if (ROOT is None):
msg = 'Please set environment variable BENCHMARK_PROJECT_ROOT'
raise StandardError(msg)
sys.path.append(os.path.join(ROOT,'lib','python'))
from sql_cxn import SQLCxn
import np_timing_utils as utils
def main(kwargs):
mattype = kwargs['mattype']
opType = kwargs['opType']
nrow = int(kwargs['nrow'])
ncol = int(kwargs['ncol'])
nproc = int(kwargs['nproc'])
path = '../output/np_{}.txt'.format(opType)
colnames = ['nproc','time1','time2','time3','time4','time5']
runTimes = pd.DataFrame(np.zeros((1,len(colnames))))
runTimes.columns = colnames
env = {
'logit_reg': logit_reg,
'reg': reg,
'gnmf': gnmf,
'robust_se': robust_se
}
X = np.random.rand(nrow, ncol)
y = np.random.rand(nrow,1).ravel() if opType != 'gnmf' else None
if opType == 'logit':
call = 'logit_reg(X,y)'
elif opType == 'reg':
call = 'reg(X,y)'
elif opType == 'gnmf':
call = 'gnmf(X, 10)'
elif opType == 'robust':
b = reg(X, y)
y_hat = X.dot(b)
env['eps'] = np.power(y_hat, 2)
call = 'robust_se(X, eps)'
else:
raise StandardError('Invalid Operation')
env['X'] = X
env['y'] = y
runTimes.ix[:,'nproc'] = nproc
runTimes.ix[:,1:] = utils.timeOp(call, env)
writeHeader = not os.path.exists(path)
runTimes.to_csv(path, index=False, header = writeHeader, mode = 'a')
def logit_reg(X, y, iterations=3):
N,K = X.shape
w = np.random.rand(K,1).ravel()
iteration = 0
step_size = 0.001
while iteration < iterations:
xb = X.dot(w)
delta = y - (1/1+np.exp(-xb))
step_size /= 2
w = w + step_size*(X.T.dot(delta)/float(N))
iteration += 1
return w
def gnmf(X, r, iterations=3):
N,K = X.shape
W = np.random.rand(N, r)
H = np.random.rand(r, K)
iteration = 0
while iteration < iterations:
W = W*((X.dot(H.T))/(W.dot(H.dot(H.T))))
H = H*((W.T.dot(X))/((W.T.dot(W).dot(H))))
iteration += 1
return W,H
def reg(X,y):
return alg.solve(X.T.dot(X), X.T.dot(y))
def robust_se(X, r2):
S = X.T*r2
XTX_INV = alg.inv(X.T.dot(X))
return XTX_INV.dot(S.dot(X)).dot(XTX_INV)
if __name__=='__main__':
args = utils.parse_cmd_args(sys.argv[1:])
main(args)
| 28.272727
| 74
| 0.615434
|
a44f3f2ba6af17e2ad654c0615a2ac58acdc7d7f
| 4,182
|
py
|
Python
|
homeassistant/components/nam/config_flow.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 11
|
2018-02-16T15:35:47.000Z
|
2020-01-14T15:20:00.000Z
|
homeassistant/components/nam/config_flow.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 77
|
2020-07-16T16:43:09.000Z
|
2022-03-31T06:14:37.000Z
|
homeassistant/components/nam/config_flow.py
|
Vaarlion/core
|
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""Adds config flow for Nettigo Air Monitor."""
from __future__ import annotations
import asyncio
import logging
from typing import Any, cast
from aiohttp.client_exceptions import ClientConnectorError
import async_timeout
from nettigo_air_monitor import ApiError, CannotGetMac, NettigoAirMonitor
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import ATTR_NAME, CONF_HOST
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import format_mac
from homeassistant.helpers.typing import DiscoveryInfoType
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
class NAMFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for Nettigo Air Monitor."""
VERSION = 1
def __init__(self) -> None:
"""Initialize flow."""
self.host: str | None = None
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
self.host = user_input[CONF_HOST]
try:
mac = await self._async_get_mac(cast(str, self.host))
except (ApiError, ClientConnectorError, asyncio.TimeoutError):
errors["base"] = "cannot_connect"
except CannotGetMac:
return self.async_abort(reason="device_unsupported")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
await self.async_set_unique_id(format_mac(mac))
self._abort_if_unique_id_configured({CONF_HOST: self.host})
return self.async_create_entry(
title=cast(str, self.host),
data=user_input,
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=""): str,
}
),
errors=errors,
)
async def async_step_zeroconf(
self, discovery_info: DiscoveryInfoType
) -> FlowResult:
"""Handle zeroconf discovery."""
self.host = discovery_info[CONF_HOST]
try:
mac = await self._async_get_mac(cast(str, self.host))
except (ApiError, ClientConnectorError, asyncio.TimeoutError):
return self.async_abort(reason="cannot_connect")
except CannotGetMac:
return self.async_abort(reason="device_unsupported")
await self.async_set_unique_id(format_mac(mac))
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context["title_placeholders"] = {
ATTR_NAME: discovery_info[ATTR_NAME].split(".")[0]
}
return await self.async_step_confirm_discovery()
async def async_step_confirm_discovery(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle discovery confirm."""
errors: dict = {}
if user_input is not None:
return self.async_create_entry(
title=cast(str, self.host),
data={CONF_HOST: self.host},
)
self._set_confirm_only()
return self.async_show_form(
step_id="confirm_discovery",
description_placeholders={CONF_HOST: self.host},
errors=errors,
)
async def _async_get_mac(self, host: str) -> str:
"""Get device MAC address."""
websession = async_get_clientsession(self.hass)
nam = NettigoAirMonitor(websession, host)
# Device firmware uses synchronous code and doesn't respond to http queries
# when reading data from sensors. The nettigo-air-monitor library tries to get
# the data 4 times, so we use a longer than usual timeout here.
with async_timeout.timeout(30):
return await nam.async_get_mac_address()
| 34.278689
| 86
| 0.639168
|
7ffec40c13cd2a03f9488697df13b0b6422efc4e
| 2,679
|
py
|
Python
|
test/command/test_write_policy_click.py
|
zscholl/policy_sentry
|
8cc3f6875a33837d075e86702a3b37f16862840c
|
[
"MIT"
] | 1,577
|
2019-10-09T21:19:40.000Z
|
2022-03-29T10:28:37.000Z
|
test/command/test_write_policy_click.py
|
zscholl/policy_sentry
|
8cc3f6875a33837d075e86702a3b37f16862840c
|
[
"MIT"
] | 218
|
2019-10-11T18:40:05.000Z
|
2022-03-01T19:00:24.000Z
|
test/command/test_write_policy_click.py
|
zscholl/policy_sentry
|
8cc3f6875a33837d075e86702a3b37f16862840c
|
[
"MIT"
] | 118
|
2019-10-09T22:40:18.000Z
|
2022-03-20T14:35:24.000Z
|
import os
import json
import unittest
from click.testing import CliRunner
from policy_sentry.command.write_policy import write_policy
from policy_sentry.util.policy_files import get_sid_names_from_policy
test_file_directory = os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
"examples",
"yml"
)
class PolicySentryClickUnitTests(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def test_write_policy_basic_with_click(self):
"""command.write_policy: using crud.yml should return exit code 0"""
template_file = os.path.join(test_file_directory, "crud.yml")
result = self.runner.invoke(write_policy, ["--input-file", template_file])
self.assertTrue(result.exit_code == 0)
def test_click_crud_case_1(self):
"""write_policy: using 1-ssm-read.yml"""
template_file = os.path.join(test_file_directory, "crud-cases", "1-ssm-read.yml")
result = self.runner.invoke(write_policy, ["--input-file", template_file])
result_json = json.loads(result.output)
self.assertListEqual(get_sid_names_from_policy(result_json), ["SsmReadParameter"])
self.assertTrue(result.exit_code == 0)
def test_click_crud_case_2(self):
"""write_policy: using 2-skip-resource-constraints.yml"""
template_file = os.path.join(test_file_directory, "crud-cases", "2-skip-resource-constraints.yml")
result = self.runner.invoke(write_policy, ["--input-file", template_file])
result_json = json.loads(result.output)
self.assertListEqual(get_sid_names_from_policy(result_json), ["SkipResourceConstraints"])
self.assertTrue(result.exit_code == 0)
def test_click_crud_case_3(self):
"""write_policy: using 3-wildcard-only-single-actions.yml"""
template_file = os.path.join(test_file_directory, "crud-cases", "3-wildcard-only-single-actions.yml")
result = self.runner.invoke(write_policy, ["--input-file", template_file])
result_json = json.loads(result.output)
self.assertListEqual(get_sid_names_from_policy(result_json), ["MultMultNone"])
self.assertTrue(result.exit_code == 0)
def test_click_crud_case_4(self):
"""write_policy: using 4-wildcard-only-bulk-selection.yml"""
template_file = os.path.join(test_file_directory, "crud-cases", "4-wildcard-only-bulk-selection.yml")
result = self.runner.invoke(write_policy, ["--input-file", template_file])
result_json = json.loads(result.output)
self.assertListEqual(get_sid_names_from_policy(result_json), ["MultMultNone"])
self.assertTrue(result.exit_code == 0)
| 46.189655
| 109
| 0.711833
|
a03a80d4ecefd06c3cf7e882293db14014aed2f1
| 11,152
|
py
|
Python
|
jni-build/jni/include/tensorflow/contrib/distributions/python/ops/laplace.py
|
rcelebi/android-elfali
|
4ea14a58a18356ef9e16aba2e7dae84c02afba12
|
[
"Apache-2.0"
] | 680
|
2016-12-03T14:38:28.000Z
|
2022-02-16T04:06:45.000Z
|
jni-build/jni/include/tensorflow/contrib/distributions/python/ops/laplace.py
|
rcelebi/android-elfali
|
4ea14a58a18356ef9e16aba2e7dae84c02afba12
|
[
"Apache-2.0"
] | 38
|
2016-11-17T08:43:51.000Z
|
2019-11-12T12:27:04.000Z
|
jni-build/jni/include/tensorflow/contrib/distributions/python/ops/laplace.py
|
rcelebi/android-elfali
|
4ea14a58a18356ef9e16aba2e7dae84c02afba12
|
[
"Apache-2.0"
] | 250
|
2016-12-05T10:37:17.000Z
|
2022-03-18T21:26:55.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Laplace distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Laplace(distribution.Distribution):
"""The Laplace distribution with location and scale > 0 parameters.
#### Mathematical details
The PDF of this distribution is:
```f(x | mu, b, b > 0) = 0.5 / b exp(-|x - mu| / b)```
Note that the Laplace distribution can be thought of two exponential
distributions spliced together "back-to-back."
"""
def __init__(self,
loc,
scale,
validate_args=True,
allow_nan_stats=False,
name="Laplace"):
"""Construct Laplace distribution with parameters `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g., `loc / scale` is a valid operation).
Args:
loc: Floating point tensor which characterizes the location (center)
of the distribution.
scale: Positive floating point tensor which characterizes the spread of
the distribution.
validate_args: Whether to validate input with asserts. If `validate_args`
is `False`, and the inputs are invalid, correct behavior is not
guaranteed.
allow_nan_stats: Boolean, default `False`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if `loc` and `scale` are of different dtype.
"""
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
with ops.op_scope([loc, scale], name):
loc = ops.convert_to_tensor(loc)
scale = ops.convert_to_tensor(scale)
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._name = name
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
self._batch_shape = self._ones().get_shape()
self._event_shape = tensor_shape.TensorShape([])
contrib_tensor_util.assert_same_float_dtype((loc, scale))
@property
def allow_nan_stats(self):
"""Boolean describing behavior when a stat is undefined for batch member."""
return self._allow_nan_stats
@property
def validate_args(self):
"""Boolean describing behavior on invalid input."""
return self._validate_args
@property
def name(self):
return self._name
@property
def dtype(self):
return self._loc.dtype
def batch_shape(self, name="batch_shape"):
"""Batch dimensions of this instance as a 1-D int32 `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op.
Returns:
`Tensor` `batch_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return array_ops.shape(self._ones())
def get_batch_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch shape
"""
return self._batch_shape
def event_shape(self, name="event_shape"):
"""Shape of a sample from a single distribution as a 1-D int32 `Tensor`.
Args:
name: name to give to the op.
Returns:
`Tensor` `event_shape`
"""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return constant_op.constant([], dtype=dtypes.int32)
def get_event_shape(self):
"""`TensorShape` available at graph construction time.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event shape
"""
return self._event_shape
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def mean(self, name="mean"):
"""Mean of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._scale, self._loc], name):
return self._loc + array_ops.zeros_like(self._scale)
def median(self, name="median"):
"""Median of this distribution."""
return self.mean(name="median")
def mode(self, name="mode"):
"""Mode of this distribution."""
return self.mean(name="mode")
def std(self, name="std"):
"""Standard deviation of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([self._scale, self._loc], name):
sqrt_2 = constant_op.constant(math.sqrt(2.), dtype=self.dtype)
return sqrt_2 * self._scale + array_ops.zeros_like(self._loc)
def variance(self, name="variance"):
"""Variance of this distribution."""
with ops.name_scope(self.name):
with ops.op_scope([], name):
return math_ops.square(self.std())
def prob(self, x, name="pdf"):
"""The prob of observations in `x` under the Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
pdf: tensor of dtype `dtype`, the pdf values of `x`.
"""
return 0.5 / self._scale * math_ops.exp(
-math_ops.abs(x - self._loc) / self._scale)
def log_prob(self, x, name="log_prob"):
"""Log prob of observations in `x` under these Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
log_prob: tensor of dtype `dtype`, the log-probability of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
log_2 = constant_op.constant(math.log(2.), dtype=self.dtype)
return (-log_2 - math_ops.log(self._scale) -
math_ops.abs(x - self._loc) / self._scale)
def cdf(self, x, name="cdf"):
"""CDF of observations in `x` under the Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
cdf: tensor of dtype `dtype`, the CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale, x], name):
x = ops.convert_to_tensor(x)
if x.dtype != self.dtype:
raise TypeError("Input x dtype does not match dtype: %s vs. %s"
% (x.dtype, self.dtype))
y = x - self._loc
return 0.5 + 0.5 * math_ops.sign(y) * (
1. - math_ops.exp(-math_ops.abs(y) / self._scale))
def log_cdf(self, x, name="log_cdf"):
"""Log CDF of observations `x` under the Laplace distribution(s).
Args:
x: tensor of dtype `dtype`, must be broadcastable with `loc` and `scale`.
name: The name to give this op.
Returns:
log_cdf: tensor of dtype `dtype`, the log-CDFs of `x`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale, x], name):
return math_ops.log(self.cdf(x))
def entropy(self, name="entropy"):
"""The entropy of Laplace distribution(s).
Args:
name: The name to give this op.
Returns:
entropy: tensor of dtype `dtype`, the entropy.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale], name):
log_2_e = constant_op.constant(math.log(2.) + 1., dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast scale.
scale = self._scale + array_ops.zeros_like(self._loc)
return log_2_e + math_ops.log(scale)
def sample_n(self, n, seed=None, name="sample_n"):
"""Sample `n` observations from the Laplace Distributions.
Args:
n: `Scalar`, type int32, the number of observations to sample.
seed: Python integer, the random seed.
name: The name to give this op.
Returns:
samples: `[n, ...]`, a `Tensor` of `n` samples for each
of the distributions determined by broadcasting the parameters.
"""
with ops.name_scope(self.name):
with ops.op_scope([self._loc, self._scale, n], name):
n = ops.convert_to_tensor(n)
n_val = tensor_util.constant_value(n)
shape = array_ops.concat(0, ([n], self.batch_shape()))
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
self.dtype.as_numpy_dtype(0.)),
maxval=self.dtype.as_numpy_dtype(1.),
dtype=self.dtype,
seed=seed)
# Provide some hints to shape inference
inferred_shape = tensor_shape.vector(n_val).concatenate(
self.get_batch_shape())
uniform_samples.set_shape(inferred_shape)
return (self._loc - self._scale * math_ops.sign(uniform_samples) *
math_ops.log(1. - math_ops.abs(uniform_samples)))
@property
def is_reparameterized(self):
return True
def _ones(self):
return array_ops.ones_like(self._loc + self._scale)
def _zeros(self):
return array_ops.zeros_like(self._loc + self._scale)
@property
def is_continuous(self):
return True
| 34
| 92
| 0.656295
|
b7f2bef82fca86168292d73fe795bb57cf394f81
| 45,490
|
py
|
Python
|
oldScripts/tmp_20180306_ClimbingPlots_Mvs_F_5minOntop.py
|
crackmech/flyclimb
|
551621d1d2747d22b407a6b640d7ccaf680b53e5
|
[
"MIT"
] | null | null | null |
oldScripts/tmp_20180306_ClimbingPlots_Mvs_F_5minOntop.py
|
crackmech/flyclimb
|
551621d1d2747d22b407a6b640d7ccaf680b53e5
|
[
"MIT"
] | null | null | null |
oldScripts/tmp_20180306_ClimbingPlots_Mvs_F_5minOntop.py
|
crackmech/flyclimb
|
551621d1d2747d22b407a6b640d7ccaf680b53e5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 6 14:25:20 2018
@author: aman
"""
imgDatafolder = 'imageData'
trackImExtension = '.jpeg'
csvExt = '.csv'
headers = ['area_average(mm^2)', 'minorAxis_average(mm)', 'majorAxis_average(mm)',\
'area_median(mm^2)', 'minorAxis_median(mm)', 'majorAxis_median(mm)' ,\
'nFrames', 'FPS', 'folderName']
selectedParameter = 'minorAxis_median(mm)'
statsfName = 'flyStats_'
blankLinesAfterParam = 2
blankLinesBeforeParam = 1
startString = 'Parameters'
selParamIndex = headers.index(selectedParameter)
pixelSize =0.055
param = 2.5# get selected parameter size in mm
blu = int(param/pixelSize) #Body length unit, used for stats calculations w.r.t the body length (minorAxis length)
AngBinMin = 0
AngBinMax = 180
AngBinStep = 1
# import time
# import copy
# import sys
# from math import atan2, degrees
# from thread import start_new_thread as startNT
# import cv2
import os
import glob
import numpy as np
import re
import random
from datetime import datetime
import Tkinter as tk
import tkFileDialog as tkd
import matplotlib.pyplot as plt
from scipy import stats
import xlwt
import dip
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.markers import MarkerStyle
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 18}
plt.rc('font', **font) # controls default text sizes
#plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=22) # fontsize of the x and y labels
# plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
# plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
# plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure titlecol = 1
col=1
plt.rcParams['axes.facecolor'] = (col,col,col)
angleBins = np.arange(AngBinMin, AngBinMax, AngBinStep)
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def present_time():
return datetime.now().strftime('%Y%m%d_%H%M%S')
def getFolder(initialDir):
'''
GUI funciton for browsing and selecting the folder
'''
root = tk.Tk()
initialDir = tkd.askdirectory(parent=root,
initialdir = initialDir, title='Please select a directory')
root.destroy()
return initialDir+'/'
def getStringLineIndex(fname, string):
'''
returns the list of line numbers containing parameterString (Parameters)
'''
stringIndex = []
with open(fname) as f:
for num, line in enumerate(f, 1):
if string in line:
stringIndex.append(num)
if stringIndex !=[]:
print('Found parameters')
return stringIndex
else:
print('No parameters found')
def getAllStats(fname, header, blanksAfterParams, blanksBeforeParams, startString):
'''
Get a list of all stats from Stats csv file
returns:
1) average stats of a single fly with STD
2) a list of lists containing all stats read from the Stats csv file
'''
print fname
parInd = getStringLineIndex(fname, startString) #get a list of indices with paramters line
parInd.reverse()
with open(fname) as f:
lines = f.readlines()
nLines = len(lines)
statslen = len(header[:-1]) #remove the folder name to calculate stats
allStats = []
allStats.append(('parameter Details',header))
for i in xrange(len(parInd)):
startIndex = parInd[i]+blanksAfterParams
if parInd[i]!=(nLines-1):
if i==0:
stopIndex = nLines
else:
stopIndex = parInd[i-1]-blanksBeforeParams-1
stats = np.zeros((stopIndex-startIndex,statslen))
for line in xrange(startIndex,stopIndex):
lineSplit = (lines[line]).split(',')[:-1]
nan = [True for x in lineSplit if 'nan' in x]
none = [True for x in lineSplit if 'None' in x]
if nan==[] and none==[]:
stats[line-startIndex,:] = lineSplit
else:
stats[line-startIndex,:] = [np.nan for x in lineSplit]
avStats = np.zeros((2, statslen))
avStats[0,:] = np.nanmean(stats, axis=0)
avStats[1,:] = np.nanstd(stats, axis=0)
allStats.append((lines[parInd[i]-1], header, stats, avStats))
else:
break
return allStats
def calcAngle3Pts(a, b, c):
'''
returns angle between a and c with b as the vertex
'''
ba = a.flatten() - b.flatten()
bc = c.flatten() - b.flatten()
cosine_angle = np.dot(ba, bc) / (np.linalg.norm(ba) * np.linalg.norm(bc))
angle = np.arccos(cosine_angle)
return np.degrees(angle)
def getData(basedir, foldername, imDatafolder, trackImExt, csvExt):
'''
returns the arrays containing various datasets from a folder containing data of a single fly
'''
d = os.path.join(basedir, foldername, imDatafolder)
csvlist = natural_sort(glob.glob(d+'/*'+csvExt))
imglist = natural_sort(glob.glob(d+'/*'+trackImExt))
data = []
for _, ids in enumerate(zip(csvlist, imglist)):
csvData = np.genfromtxt(ids[0], dtype='float',delimiter = ',', skip_header=1)
data.append([csvData, ids[0], ids[1]])
return data
def getConsecData(csvData, consecStep, eudDisMinThresh, eudDisMaxThresh, fps, bodyLen):
'''
Input: list of lists of
a) csvdata
b) CSV file and Image file path details
returns a list containing
a) X coordinate
b) Y coordinate
c) Angle b/w i-consecStep, i, i+consecStep
d) Eucledian distance between i,i+consecStep
CSV file and Image file path details
'''
allangles = []
consecStep = int(consecStep)
for _, data in enumerate(csvData):
csvdata = data[0]
angles = []
for i in xrange(consecStep, len(csvdata)-consecStep-1, consecStep):
p0 = csvdata[i-consecStep]
p1 = csvdata[i]
p2 = csvdata[i+consecStep]
euDis = np.linalg.norm(csvdata[i+consecStep]-csvdata[i])
speed = (euDis*fps)/(consecStep*bodyLen)
angle = (calcAngle3Pts(p0,p1,p2))
if eudDisMinThresh<euDis<eudDisMaxThresh:
angles.append(np.array([csvdata[i][0], csvdata[i][1], angle, euDis, speed]))
allangles.append((np.array(angles), data[1], data[2])) #data[1] contains csv filename, data[2] contins img filename
return allangles
def random_color():
levels = [x/255.0 for x in range(32,256,32)]
return tuple(random.choice(levels) for _ in range(3))
def getHistMode(array, bins):
'''
Calculates the mode value based on the histogram of a given array
returns: the upper and lower limit of the most common value in the array
'''
a = array.copy()
freq, edgs = np.histogram(a[~np.isnan(a)],bins)
maxValMin = edgs[np.argmax(freq)]
maxValMax = maxValMin+np.max(np.diff(bins))
return maxValMin, maxValMax
def reject_outliers(data, m=2):
return data[abs(data - np.nanmean(data)) < m * np.nanstd(data)]
def getFlyStats(genotypeData, consecFrameStep, minDisThres, maxDisThres):
'''Calculates statistics of data from a genotype (folder with 'n' flies)
input:
genotypeData: a list of lists containing trackCSVData and fly TrackStats for each fly in the genotype
consecFrameStep: step size of consecutive frames to calculate
a) Angle of fly calculated between (currentFrame-consecFrameStep), currentFrame, (currentFrame+consecFrameStep)
b) Distance covered by the fly between (currentFrame) and (currentFrame+consecFrameStep)
minDisThres: minimum distance moved by a fly between (currentFrame) and (currentFrame+consecFrameStep)
maxDisThres: maximum distance moved by a fly between (currentFrame) and (currentFrame+consecFrameStep)
returns:
genotypeStats: a list of all stats of a genotype
each list element contains data from all tracks of a fly, with:
meanAngle, medianAngle, stdAngle, meanInstantSpeed, medianInstanSpeed, stdInstantSpeed,\
meanInstantSpeed per BodyLengthUnit, medianInstanSpeed per BodyLengthUnit, stdInstantSpeed per BodyLengthUnit
angles: a list of arrays of all tracks of the each fly completed to the length of the longest track, shorter track data padded by np.nan
angsData: a list of lists of all trackData calculated by given parameters
'''
genotypeStats = []
angles = []
angsData = []
for i,d in enumerate(genotypeData):
csvData, allStats = d
fps = allStats[1][-1][0,-1]
pixelSize =float( [x for x in allStats[1][0].split(',') if 'pixelSize' in x ][0].split(':')[-1])
param = allStats[1][-1][0,selParamIndex]# get selected parameter size in mm
blu = int(param/pixelSize) #Body length unit, used for stats calculations w.r.t the body length (minorAxis length)
print blu, fps, pixelSize
angData = getConsecData(csvData,consecFrameStep, minDisThres, maxDisThres, fps, blu)
#print len(angData), len(angData[0])
maxTrackLen = max([len(x[0]) for x in angData])
angs = np.zeros((len(angData),maxTrackLen,3))# array of angle, distance and speeds for each track of a fly
flyStats = np.zeros((maxTrackLen,9))# array of median angle, median distance and median speed and their STDEV for each fly
angs[:] = np.nan
for i,ang in enumerate(angData):
for j in xrange(len(ang[0])):
angs[i,j,0] = ang[0][j,2]# copy angles
angs[i,j,1] = ang[0][j,3]# copy distance
angs[i,j,2] = ang[0][j,4]# copy speeds
for i in xrange(0, len(flyStats[0]),3):
data = angs[:,:,i/3]
flyStats[:,i] = np.nanmean(data, axis=0)
flyStats[:,i+1] = getMedian(data, i)
flyStats[:,i+2] = np.nanstd(data, axis=0)
genotypeStats.append(flyStats)
angles.append(angs)
angsData.append(angData)
return genotypeStats, angles, angsData
def getMedian(dataArray, i):
'''
returns the "median" value of dataArray
median is calculated by the function needed to replace the np.median for the dataArray
'''
med = np.zeros((len(dataArray[0])))
med = np.median(dataArray, axis=0)
#return med
if i==0:
bins = angleBins
else:
bins = speedBins
for j in xrange(len(med)):
med[j] = getHistMode(dataArray[:,j], bins)[0]
return med
maxTimeThresh = 300 # time for calculation of data from tracks under this much seconds
chukFrames = 20 # number of frames to be chucked from start and end of the track to initiate data calculation
minTrackLen = blu*10
unitTime = 60
disMinThres = blu/20
disMaxThres = blu
consecWin = 7
trackLenThresh = 10*blu
speedBinMin = disMinThres
speedBinMax = disMaxThres
speedBinStep = 0.1
speedBins = np.arange(speedBinMin, speedBinMax, speedBinStep)
baseDir = '/media/aman/data/flyWalk_data/climbingData/controls'
#baseDir = '/media/pointgrey/data/flywalk/20180104/'
colors = [random_color() for c in xrange(1000)]
baseDir = getFolder(baseDir)
dirs = natural_sort([ name for name in os.listdir(baseDir) if os.path.isdir(os.path.join(baseDir, name)) ])
if 'W1118' in dirs:
if 'CS' in dirs:
csIndex = dirs.index('CS')
w1118Index = dirs.index('W1118')
dirs.pop(csIndex)
dirs.insert(0, 'CS')
dirs.pop(w1118Index)
dirs.insert(1, 'W1118')
else:
w1118Index = dirs.index('W1118')
dirs.pop(w1118Index)
dirs.insert(0, 'W1118')
genotypes = ['CS','Dop2R','Park25','PINK1RV', r'Trp-$\gamma$']
saveDir = '/media/aman/data/thesis/ClimbingPaper/data/'+baseDir.split('/')[-2]+'/'+baseDir.split('/')[-2]+'_'
saveFiles = ''
for _,d in enumerate(dirs):
saveFiles+='_'+d
saveFiles
print "Started processing directories at "+present_time()
def getAllFlyStats(genotypeDir):
'''
returns allFlyStats for a all fly folders in a given folder (genotypeDir)
'''
rawdirs = natural_sort([ name for name in os.listdir(genotypeDir) if os.path.isdir(os.path.join(genotypeDir, name)) ])
cs = []
for rawDir in rawdirs:
print'=====raw'
csvData = getData(genotypeDir, rawDir, imgDatafolder, trackImExtension, csvExt)
fname = glob.glob(os.path.join(genotypeDir, rawDir, statsfName+'*'))[0]
trackStats = getAllStats(fname, headers, blankLinesAfterParam, blankLinesBeforeParam, startString)
cs.append([csvData, trackStats])
return getFlyStats(cs, consecWin, disMinThres, disMaxThres)
def getAllFlyCsvData(genotypeDir):
'''
returns data of all Csv's of all tracks for all fly folders in a given folder (genotypeDir)
'''
rawdirs = natural_sort([ name for name in os.listdir(genotypeDir) if os.path.isdir(os.path.join(genotypeDir, name)) ])
cs = []
for rawDir in rawdirs:
print'=====raw'
csvData = getData(genotypeDir, rawDir, imgDatafolder, trackImExtension, csvExt)
fname = glob.glob(os.path.join(genotypeDir, rawDir,statsfName+'*'))[0]
trackStats = getAllStats(fname, headers, blankLinesAfterParam, blankLinesBeforeParam, startString)
cs.append([csvData, trackStats])
return cs
def getTimeDiffFromTimes(t2, t1):
'''
returns the time difference between two times, t2 and t1, (input in format '%Y%m%d_%H%M%S')
returns no. os seconds elapsed between t2 and t13
'''
time1 = datetime.strptime(t1, '%Y%m%d_%H%M%S')
time2 = datetime.strptime(t2, '%Y%m%d_%H%M%S')
return (time2-time1).total_seconds()
alfa = 0.71
div = 255.0
colors_ = [(0/div,0/div,0/div,alfa),#gray
(200/div,129/div,0/div,alfa),#orange
(86/div,180/div,233/div,alfa),#Light blue
(204/div,121/div,167/div,alfa),#pink
(0/div,158/div,115/div,alfa),#greenish
(0/div,114/div,178/div,alfa),#blue
(213/div,94/div,0/div,alfa),#orange
(240/div,228/div,66/div,alfa),#yellow
(220/div,198/div,66/div,alfa)#dark yellowish
]
markers = ['^','s','v','d','o', 'P']
#---------declare the proper genotypes, markers and colors for the genotypes!!!!------------
genotypes = []
colors = []
markers = []
for i, gt in enumerate(dirs):
if gt in ('CS', 'cs'):
genotypes.append(gt)
colors.append((0/div,0/div,0/div,alfa))
markers.append('^')
elif gt in ('W1118', 'w1118'):
genotypes.append(r'W$^1$$^1$$^1$$^8$')
colors.append((230/div,218/div,66/div,alfa))
markers.append('P')
elif gt in ('Trp-Gamma', 'trp'):
genotypes.append(r'Trp-$\gamma$')
colors.append((0/div,158/div,115/div,alfa))
markers.append('o')
elif gt in ('Park_+', 'PARK_+'):
genotypes.append(r'Park$^2$$^5$/+')
colors.append((70/div,0/div,10/div,alfa))
markers.append('o')
elif gt in ('PINK1RV', 'pink1rv'):
genotypes.append(r'PINK1$^R$$^V$')
colors.append((204/div,121/div,167/div,alfa))
markers.append('d')
elif gt in ('PARK25_TM3', 'Park25_TM3'):
genotypes.append(r'Park$^2$$^5$/TM3')
colors.append((86/div,180/div,233/div,alfa))
markers.append('v')
elif gt in ('Dop2R', 'dop2r'):
genotypes.append(gt)
colors.append((180/div,109/div,0/div,alfa))
markers.append('s')
else:
genotypes.append(gt)
colors.append((255/div,0/div,0/div,alfa))
markers.append('8')
print i, gt, len(colors), colors
sMarkers = markers
allGenotypesCsvData = []
for _,d in enumerate(dirs):
path = os.path.join(baseDir, d)
allGenotypesCsvData.append([path, getAllFlyCsvData(path)])
genoTypeDataProcessed = []
for g, genotype in enumerate(allGenotypesCsvData):
for f, fly in enumerate(genotype[1]):
flyAlltracks = []
flyTimeThTracks = []
print "------",f,"------"
for t, tracks in enumerate(fly[0]):
print t
def getTrackDirection(trackData, minDis):
'''
returns a +1 or -1 based on direction of fly movement.
If the fly walks from left to right it returns -1 (equivalent to bottom to top for climbing)
if the fly walks from right to left, it returns +1 (equivalent to top to bottom for climbing)
Value is calculated purely on the basis of a line fit on the track based on change of X-coordinate w.r.t frames
'''
dataLen = len(trackData)
m,c,r,_,_ = stats.linregress(np.arange(dataLen), trackData[:,0])
delta = (m*(9*(dataLen/10))+c)-(m*(dataLen/10)+c)
if delta>=minDis:
return -1, r
elif delta<=-minDis:
return 1, r
else:
return 0, r
def getTrackData(csvdata, skipFrames, consecStep, eudDisMinThresh, eudDisMaxThresh, bodyLen, fps):
'''
Input: list of lists of
a) csvdata
b) CSV file and Image file path details
returns a list containing
a) X coordinate
b) Y coordinate
c) Angle b/w i-consecStep, i, i+consecStep
d) Eucledian distance between i,i+consecStep
CSV file and Image file path details
'''
consecStep = int(consecStep)
angles = []
startFrame = consecStep*skipFrames
stopFrame = len(csvdata)-(consecStep*skipFrames)-1
for i in xrange(startFrame, stopFrame, consecStep):
p0 = csvdata[i-consecStep]
p1 = csvdata[i]
p2 = csvdata[i+consecStep]
euDis = np.linalg.norm(csvdata[i+consecStep]-csvdata[i])
angle = (calcAngle3Pts(p0,p1,p2))
if eudDisMinThresh > euDis or euDis > eudDisMaxThresh:
angle = np.nan
euDis = np.nan
speed = (euDis*fps)/(consecStep*bodyLen)
angles.append(np.array([csvdata[i][0], csvdata[i][1], angle, euDis, speed]))
angles = np.array(angles)
speedTrack = angles[:,4]
trackAvInsSpeed = np.nanmean(speedTrack)
trackDis = np.nansum(speedTrack)
trackAvSpeed = trackDis*fps/(consecStep*bodyLen*len(angles))
trackDirection = getTrackDirection(angles, bodyLen)
trackDetails = [trackAvInsSpeed, trackAvSpeed, trackDis, trackDirection]
trackDetailsHeader = ['Average InsSpeed for the track', 'AverageSpeed','Total distance of the track', 'Track Direction']
return angles, trackDetails, trackDetailsHeader , len(angles)#data[1] contains csv filename, data[2] contins img filename
def getFlyDetails(allStats):
'''
returns average FPS, body length for a fly by getting details from its folder
'''
fps = allStats[1][-1][0,-1]
pixelSize =float( [x for x in allStats[1][0].split(',') if 'pixelSize' in x ][0].split(':')[-1])
param = allStats[1][-1][0,selParamIndex]# get selected parameter size in mm
blu = int(param/pixelSize) #Body length unit, used for stats calculations w.r.t the body length (minorAxis length)
return blu, fps
genoTypeDataProcessed = []
for g, genotype in enumerate(allGenotypesCsvData):
allFlyAllTracks = []
for f, fly in enumerate(genotype[1]):
blu, fps = getFlyDetails(fly[1])
flyAlltracks = []
flyTimeThTracks = []
print "------",f,"------"
for t, tracks in enumerate(fly[0]):
if t==0:
trackTimeDiff = 0
startTrackCsvName = fly[0][t][1]
startTrackTime = startTrackCsvName.split('/')[-1].split('_trackData')[0]
else:
currTrackCsvName = fly[0][t][1]
currTrackTime = currTrackCsvName.split('/')[-1].split('_trackData')[0]
trackTimeDiff = getTimeDiffFromTimes(currTrackTime, startTrackTime)
if len(tracks[0])>(2+(2*consecWin*chukFrames)):
trackData = getTrackData(tracks[0], chukFrames, consecWin, blu/50.0, blu, blu, fps)
flyAlltracks.append([trackData, trackData[1][-1], trackTimeDiff, tracks[1:]])
allFlyAllTracks.append(flyAlltracks)
genoTypeDataProcessed.append(allFlyAllTracks)
#--- avAvspeed for each fly-----
def getFlySpeedDisData(flyTrackData, timeThresh, trackLenThresh, unitTime, imFolder):
'''
returns the
average speed
STDEV of average speed
distanceTravelled in timeThresh
number of tracks in timeThresh
distanceTravelled in unitTime
nTracks in unitTime
'''
flyAllData = []
flyAllInsSpeeds = []
flyGeoIndex = 0
for _,tr in enumerate(flyTrackData):
if tr[2]<timeThresh:
if tr[0][1][2]>trackLenThresh:
avSpeed = tr[0][1][1] # average speed of the fly
dis = tr[0][1][2] # distance covered by the fly
insSpeeds = tr[0][0][:,-1] # list of instantaneous speed of the track
flyGeoIndex+=tr[1][0] # geotactic index of the fly
pathR = abs(tr[1][1]) # value of 'r' value of the path
flyAllData.append([avSpeed, dis,flyGeoIndex, tr[0][-1], pathR, tr[2]])
flyAllInsSpeeds.extend(insSpeeds[~np.isnan(insSpeeds)])
flyAllData = np.array(flyAllData)
flyDisPerUnitTime = []
for j in xrange(unitTime, timeThresh+1, unitTime):
disPerUT = []
for i in xrange(len(flyAllData[:,-1])):
if (j-unitTime)<=flyAllData[i,-1]<j:
disPerUT.append(flyAllData[i,:])
flyDisPerUnitTime.append(np.array(disPerUT))
print"---"
'''
flyAllData contains: avSpeed per track, distance moved per track, geotactic Index, nFrames per track, time from starting imaging of the fly
flyAllInsSpeeds contains: a single arrray of all instaneous speeds of the fly
flyDisPerUnitTime contains: a list of avSpeed,DisMoved,geotactic Index,nFrames,timeFromStarting per unit time, for time segment plots
'''
return np.array(flyAllData), np.array(flyAllInsSpeeds), flyTrackData[0][-1][0].split(imFolder)[0], flyDisPerUnitTime
maxTimeThresh = 300 # time for calculation of data from tracks under this much seconds
chukFrames = 20 # number of frames to be chucked from start and end of the track to initiate data calculation
minTrackLen = blu*3
unitTime = 60
nUnitTimes = maxTimeThresh/unitTime
nParams = 6
unitDataIndex = -1
xlGapColumns = 2
allGenotypeMovementData = []
for i, genotypeTracks in enumerate(genoTypeDataProcessed):
genotypeMovementData = []
for _,flAllTrks in enumerate(genotypeTracks):
flyMovementData = getFlySpeedDisData(flAllTrks, maxTimeThresh, minTrackLen, unitTime, imgDatafolder)
genotypeMovementData.append(flyMovementData)
allGenotypeMovementData.append(genotypeMovementData)
genotypeAvSpeed = []
genotypeDis = []
genotypeNTracks = []
genotypeGeoTacInd = []
genotypeLenTrack = []
genotypeStraight =[]
genotypeInsSpeed = []
genotypeName = []
genotypeMarker = []
# genotypeDisPerUT = []
# genotypeNTrackPerUT = []
for _,genotype in enumerate(allGenotypeMovementData):
flyName = []
flyAvSpeed = []
flyDis = []
flyNTracks = []
flyGeoTacInd = []
# flyDisPerUT = []
# flyNTrackPerUT = []
flyInsSpeed = []
flylenTrack = []
flyStraight = []
flyMarker = []
for i,fly in enumerate(genotype):
flyAvSpeed.append(np.mean(fly[0][:,0]))
flyDis.append(np.nansum(fly[0][:,1]))
if i==0:
flyInsSpeed = fly[1]
#flylenTrack = fly[0][:,3]# for track duration
else:
flyInsSpeed = np.hstack((flyInsSpeed, fly[1]))
#flylenTrack = np.hstack((flylenTrack,fly[0][:,3]))# for track duration
flylenTrack.append(np.median(fly[0][:,3]))# for track duration
flyGeoTacInd.append(fly[0][-1,2]/len(fly[0]))
flyNTracks.append(len(fly[0]))
flyStraight.append(np.nanmean(fly[0][:,4]))
flyName.append(fly[2])
if '_male' in fly[2]:
flyMarker.append((1,1,1))# open circles for males
elif '_female' in fly[2]:
flyMarker.append((0,0,0))# closed circles for females
genotypeLenTrack.append(np.array(flylenTrack))
genotypeAvSpeed.append(np.array(flyAvSpeed))
genotypeDis.append(np.array(flyDis))
genotypeNTracks.append(np.array(flyNTracks))
genotypeGeoTacInd.append(np.array(flyGeoTacInd))
genotypeInsSpeed.append(flyInsSpeed)
genotypeStraight.append(flyStraight)
genotypeName.append(flyName)
genotypeMarker.append(flyMarker)
plotTitles = ['Number of Tracks\nin 5 minutes',
'Duration of Tracks',
'Total Distance Travelled\nin 5 minutes',
'Average Speed',
'Path Straightness',
'Geotactic Index',
]
plotTitlesPerUT = ['Number of Tracks',
'Duration of Tracks',
'Total Distance Travelled',
'Average Speed',
'Path Straightness',
'Geotactic Index',
]
plotYLabels = ['Number of Tracks',
'duration of Tracks\n(s)',
'Distance Traveled\n'+r'(BLU x10$^3$)',
'Average Speed\n(BLU/S)',
'Path Straightness\n'+r'(R$^2$ Value)',
'Geotactic Index',
]
plotYLabels5min = ['Number of Tracks',
'Seconds',
r'Body Lengths (x10$^3$)',
'Body Length / S',
r'R$^2$ Value',
'Geotactic Index',
]
def getLenTrackStats(trackLenArray):
if trackLenArray.size > 0:
return np.median(trackLenArray)
else:
return 0
vPlotPos = np.arange(len(genotypes))
sWidth = 0.012
sSize = 5
sMarker = 'o'
sAlpha = 0.6
sLinewidth = 0.2
sEdgCol = (0,0,0)
sCol = genotypeMarker[0]
scatterDataWidth = 0.012
def plotScatter(axis, data, scatterX, scatterWidth = sWidth, \
scatterRadius = sSize , scatterColor = sCol,\
scatterMarker = sMarker, scatterAlpha = sAlpha, \
scatterLineWidth = sLinewidth, scatterEdgeColor = sEdgCol, zOrder=0):
'''
Takes the data and outputs the scatter plot on the given axis.
Returns the axis with scatter plot
'''
return axis.scatter(np.linspace(scatterWidth+scatterX, -scatterWidth+scatterX,len(data)), data,\
s=scatterRadius, color = scatterColor, marker=scatterMarker,\
alpha=scatterAlpha, linewidths=scatterLineWidth, edgecolors=scatterEdgeColor, zorder=zOrder )
#---get the per unit time data ----
def set_axis_style(ax, labels):
ax.get_xaxis().set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(0, len(labels)+0))
ax.set_xticklabels(labels)
ax.set_xlim(-1, len(labels))
allGenotypePerUT_Data = []
for _,genotype in enumerate(allGenotypeMovementData):
genotypePerUT_Data = []
for i,fly in enumerate(genotype):
flyDataPerUT = np.zeros((nUnitTimes, nParams))
nTracks = 0
for t in xrange(nUnitTimes):
if fly[unitDataIndex][t].size > 0:
nTracks+=len(fly[unitDataIndex][t])
flyDataPerUT[t,0] = (len(fly[unitDataIndex][t]))# nTracks
flyDataPerUT[t,1] = (getLenTrackStats(fly[unitDataIndex][t][:,3])) # duration of track
flyDataPerUT[t,2] = (np.nansum(fly[unitDataIndex][t][:,1])) # Total Distance
flyDataPerUT[t,3] = (np.nanmean(fly[unitDataIndex][t][:,0])) #Average Speed
flyDataPerUT[t,4] = (np.nanmean(fly[unitDataIndex][t][:,4])) # Path Straightness
flyDataPerUT[t,5] = (fly[unitDataIndex][t][-1, 2])/nTracks # Geotactic Index
genotypePerUT_Data.append(flyDataPerUT)
allGenotypePerUT_Data.append(genotypePerUT_Data)
# allGenotypePerUT_Data.append(np.array(genotypePerUT_Data))
#------- CHECK for NORMALITY --------
params = ['nTracks', 'trackDuration', 'Distance',\
'Speed', 'PathStraightness', 'GeotacticIndex']
genotypeParams = [genotypeNTracks,
genotypeLenTrack,
genotypeDis,
genotypeAvSpeed,
genotypeStraight,
genotypeGeoTacInd]
# f = open(saveDir + "climbing5MinutesStats"+saveFiles+".csv", 'wa')
f = open(("%s%s_climbing5MinutesStats%s.csv"%(saveDir, present_time(), saveFiles)), 'wa')
#--Check normality for 5 minutes data----
print '\n\n\n----Check normality for 5 minutes data----'
for p, par in enumerate(genotypeParams):
print '------', params[p],'------'
f.write('\n\n------Normality check for %s------\n'%params[p])
for g, gt in enumerate(par):
print stats.normaltest(gt)
f.write('%s: %s\n'%(genotypes[g],str(stats.normaltest(gt))))
f.close()
#--Check normality for Per minute data----
f = open(("%s%s_climbingPerMinuteStats%s.csv"%(saveDir, present_time(), saveFiles)), 'wa')
fn = open(("%s%s_climbingPerMinuteNormalityStats%s.csv"%(saveDir, present_time(), saveFiles)), 'wa')
f.write('\nKruskal-Wallis test for: ')
fn.write('\nD’Agostino-Pearson’s Normality test for: \n')
print '\n\n\n---Checking normality for Per minute data----'
for t in xrange(nUnitTimes):
for p, par in enumerate(params):
gtData = []
for i in xrange(len(dirs)):
parData = [allGenotypePerUT_Data[i][x][t,p] for x in xrange(len(allGenotypePerUT_Data[i]))]
gtData.append(parData)
print 'Normality value for: %s of %s (%d minute)'%(params[p], dirs[i], (t+1))
fn.write('Normality value for: %s of %s (%d minute): %s\n'%(params[p], dirs[i], (t+1), str(stats.normaltest(parData))))
print ('normal: %f, ShapiroWilk: %f'%(stats.normaltest(parData)[1], stats.shapiro(parData)[1]))
print '\n---KruskalWallis:',stats.kruskal(*gtData)
print '---OneWayANOVA:',stats.f_oneway(*gtData)
f.write('\n:%s (%d minute): %s'%(params[p], t+1, str(stats.kruskal(*gtData))))
f.close()
trackFPS = 35
bAlpha = 0.5
vAlpha = 0.5
vAlphaCS = 0.5
plotYLabels = ['Number of Tracks\n\n(number)',
'Duration of Tracks\n\n(s)',
'Distance Traveled\n\n'+r'(BLU x10$^3$)',
'Average Speed\n\n(BLU/S)',
'Path Straightness\n\n'+r'(R$^2$ Value)',
'Geotactic Index\n\n(index)',
]
plotYLabels = ['number',
'seconds',
r'BLU (x10$^3$)',
'BLU/S',
r'R$^2$ Value',
'Geotactic Index',
]
tSeriesPlotIndex = 1
total5MinPlotIndex = 0
nPlotStacks = 2
figRatio = [3,1]
figWidth = 7
figHeight = 7/1.618
tightLayout = False
wSpace = 0.4
hSpace = 0.15
marginLeft = 0.05
marginRight = 0.99
marginTop = 0.97
marginBottom = 0.082
legendHorPos = 0.32
legendVerPos = 1.058
legendAxesRowSet = total5MinPlotIndex
legendAxesRowGet = tSeriesPlotIndex
legendAxesColSet = 4
legendAxesColGet = 4
nParamsToPlot = nParams-1
dataToPlot = [genotypeNTracks,
genotypeLenTrack,
genotypeDis,
genotypeAvSpeed,
genotypeStraight,
genotypeGeoTacInd]
ax00 = {'yticks': np.arange(5) }
ax10 = {'yticks': np.arange(0,36,5), 'ylim':(0,36)}
nSecs = 7
ax01 = {'yticks': np.arange(0, trackFPS*nSecs, 2*trackFPS) , 'yticklabels': np.arange(0,nSecs,2), 'ylim':(0,trackFPS*nSecs)}
nSecs = 13
ax11 = {'yticks': np.arange(0, trackFPS*nSecs, 2*trackFPS),'yticklabels': np.arange(0,nSecs,2), 'ylim':(0,trackFPS*nSecs) }
ax02 = {'yticks': np.arange(0,5000,1000), 'yticklabels': np.arange(5) }
ax12 = {'yticks': np.arange(0,21000,5000), 'yticklabels': np.arange(0,21,5), 'ylim':(0,21000) }
ax03 = {'yticks': np.arange(0,10,2)}
ax13 = {'yticks': np.arange(0,10,2)}
ax04 = {'ylim': (0, 1.1), 'yticks': [0, 0.5, 1]}
ax14 = {'ylim': (0, 1.5), 'yticks': [0, 0.5, 1]}
ax05 = {'ylim': (1.2, -1.5), 'yticks': [-1, 0, 1]}
ax15 = {'ylim': (1.2, -1.5), 'yticks': [-1, 0, 1]}
axP = [
[ax10, ax11, ax12, ax13, ax14, ax15],
[ax00, ax01, ax02, ax03, ax04, ax05]
]
fontSize = 8
plt.rc('font', family='serif', serif='Arial', size=fontSize)
plt.rc('ytick', labelsize=fontSize)
plt.rc('axes', labelsize=fontSize)
plt.rc('xtick', labelsize=fontSize)
showMeans = False
showMedians = True
showExtrema = False
medianColor = 'Orange'
vPlotLineShow = 'cmedians'
bwMethod = 'silverman'
boxLineWidth = 0.5
boxprops = dict(linestyle='--', linewidth=boxLineWidth)
whiskerprops = dict(linestyle='--', linewidth=boxLineWidth)
capprops = dict(linestyle='--', linewidth=boxLineWidth)
medianprops = dict(linestyle = None, linewidth=0)
boxPro = dict(boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops)
ptime = present_time()
figDir = '/media/aman/data/thesis/ClimbingPaper/Figures/raw'
csFigNamePng = ('%s/png/%s_CS.png'%(figDir, ptime))
combinedFigNamePng = ('%s/png/%s_%s.png'%(figDir, ptime, '_'.join(dirs)))
csFigNameSvg = ('%s/%s_CS.svg'%(figDir, ptime))
combinedFigNameSvg = ('%s/%s_%s.svg'%(figDir, ptime, '_'.join(dirs)))
gtiFigNamePng = ('%s/png/%s_%s_GTI.png'%(figDir, ptime, '_'.join(dirs)))
gtiFigNameSvg = ('%s/%s_%s_GTI.svg'%(figDir, ptime, '_'.join(dirs)))
dpi = 300
sMarkers = ['o' for x in sMarkers]
if 'CS' in dirs:
csIndex = dirs.index('CS')
csGT = allGenotypePerUT_Data[csIndex]
data = np.nanmean(csGT[:], axis=0)
sem = stats.sem(csGT[:], axis=0)
vPlotPosCS = [csIndex+1]
ax10 = {'yticks': np.arange(0,21,5), 'ylim':(0,21)}
axP[0][0]=ax10
fig, ax = plt.subplots(nPlotStacks,nParamsToPlot, figsize=(figWidth, figHeight), tight_layout = tightLayout, gridspec_kw = {'height_ratios':figRatio})
fig.subplots_adjust(left=marginLeft, bottom=marginBottom, right=marginRight, top=marginTop, wspace = wSpace, hspace = hSpace)
for i in xrange(nParamsToPlot):
ax[tSeriesPlotIndex, i].errorbar(np.arange(len(data[:,i])), data[:,i], yerr=sem[:,i], color=colors[0], fmt='-'+markers[0])
bPlots = []
vPlots = []
for i in xrange(nParamsToPlot):
plotData = dataToPlot[i][csIndex]
vp = ax[total5MinPlotIndex, i].violinplot(plotData, vPlotPosCS, showmeans=showMeans, showmedians=showMedians, showextrema=showExtrema, bw_method=bwMethod)
bp = ax[total5MinPlotIndex, i].boxplot(plotData, sym='', medianprops = medianprops, boxprops = boxprops, whiskerprops = whiskerprops, capprops = capprops, zorder=1)
plotScatter(ax[total5MinPlotIndex, i], plotData, scatterX = vPlotPosCS[0], scatterMarker = sMarkers[csIndex], scatterColor = genotypeMarker[csIndex], zOrder=2)
vPlots.append(vp)
bPlots.append(bp)
for vplot in vPlots:
vplot[vPlotLineShow].set_color(medianColor)
for patch, color in zip(vplot['bodies'], colors):
patch.set_color(color)
patch.set_edgecolor(None)
patch.set_alpha(vAlphaCS)
for i in xrange(len(axP)):
for j in xrange(nParamsToPlot):
plt.setp([ax[i,j].spines[x].set_visible(False) for x in ['top','right']])
plt.setp(ax[i,j].yaxis.grid(True, linestyle='-', which='major', color='lightgrey',alpha=0.5))
plt.setp(ax[i, j].get_yticklabels(), rotation=90, horizontalalignment='center', verticalalignment='center')
plt.setp(ax[i,j], ylabel = plotYLabels[j])
plt.setp(ax[i,j], **axP[i][j])
if i==tSeriesPlotIndex:
plt.setp(ax[i,j], xticks = [0,1,2,3,4], xticklabels = [1,2,3,4,5], xlabel = 'minutes')
plt.setp([axs for axs in ax[total5MinPlotIndex, :]], xlim=[0,2], xticks = [0], xticklabels = [])
plt.savefig(csFigNamePng, dpi=dpi, format='png')
plt.savefig(csFigNameSvg, format='svg')
# plt.show()
# fig.set_size_inches(7,7/1.618)
if 'W1118' in dirs:
fig, ax = plt.subplots(nPlotStacks,nParamsToPlot, figsize=(figWidth, figHeight), tight_layout = tightLayout, gridspec_kw = {'height_ratios':figRatio})
fig.subplots_adjust(left=marginLeft, bottom=marginBottom, right=marginRight, top=marginTop, wspace = wSpace, hspace = hSpace)
for c, gt in enumerate(allGenotypePerUT_Data):
data = np.nanmean(gt[:], axis=0)
sem = stats.sem(gt[:], axis=0)
tPlots = []
for i in xrange(0, nParamsToPlot):
tp = ax[tSeriesPlotIndex,i].errorbar(np.arange(len(data[:,i])), data[:,i], yerr=sem[:,i], color=colors[c], fmt='-'+markers[c], label=genotypes[c])
tPlots.append(tp)
legendHandles, legendLabels = ax[legendAxesRowGet, legendAxesColGet].get_legend_handles_labels()
ax[legendAxesRowSet, legendAxesColSet].legend(handles=legendHandles,labels=legendLabels, bbox_to_anchor=(legendHorPos, legendVerPos), loc=2, shadow=True, edgecolor=(0,0,0), fontsize='x-small', ncol=1).draggable()
bPlots = []
vPlots = []
for i in xrange(0, nParamsToPlot):
plotData = dataToPlot[i]
vp = ax[total5MinPlotIndex, i].violinplot([da for da in plotData], vPlotPos+1, showmeans=showMeans, showmedians=showMedians, showextrema=showExtrema, bw_method=bwMethod)
bp = ax[total5MinPlotIndex, i].boxplot([da for da in plotData], sym='', medianprops = medianprops, boxprops = boxprops, whiskerprops = whiskerprops, capprops = capprops, zorder=1)
for s,scatterPlotData in enumerate(plotData):
plotScatter(ax[total5MinPlotIndex, i], scatterPlotData, scatterX = s+1, scatterMarker = sMarkers[s], scatterColor = genotypeMarker[s], zOrder=2)
vPlots.append(vp)
bPlots.append(bp)
for vplot in vPlots:
vplot[vPlotLineShow].set_color(medianColor)
for patch, color in zip(vplot['bodies'], colors):
patch.set_color(color)
patch.set_edgecolor(None)
patch.set_alpha(vAlpha)
for i in xrange(0, len(axP)):
for j in xrange(0, nParamsToPlot):
plt.setp([ax[i,j].spines[x].set_visible(False) for x in ['top','right']])
plt.setp(ax[i,j].yaxis.grid(True, linestyle='-', which='major', color='lightgrey',alpha=0.5))
plt.setp(ax[i, j].get_yticklabels(), rotation=90, horizontalalignment='center', verticalalignment='center')
plt.setp(ax[i,j], ylabel = plotYLabels[j])
plt.setp(ax[i,j], **axP[i][j])
if i==tSeriesPlotIndex:
plt.setp(ax[i,j], xticks = [0,1,2,3,4], xticklabels = [1,2,3,4,5], xlabel = 'minutes')
plt.setp([axs for axs in ax[total5MinPlotIndex, :]], xlim=[0,len(genotypes)+1], xticks = [0], xticklabels = [])
plt.savefig(combinedFigNamePng, dpi=dpi, format='png')
plt.savefig(combinedFigNameSvg, format='svg')
# plt.show()
gtinParamsToPlot = 1
gtiFigWidth = 2.2
gtiFigHeight = figHeight+0.5
gtiMarginLeft = 0.2
gtiMarginRight = marginRight
gtiMarginTop = marginTop-0.07
gtiMarginBottom = marginBottom + 0.01
gtilegendVerPos = legendVerPos+0.1
ax0 = {'yticks': np.arange(-1, 2), 'ylim':(1.2, -1.2) }
# ax1 = {'yticks': np.arange(-1, 2), 'ylim':(1.2, -1.2) }
# ax1 = {'yticks': np.arange(0,36,5), 'ylim':(0,36)}
axP1 = [ax0, ax0]
# [ax10, ax11, ax12, ax13, ax14, ax15],
# [ax00, ax01, ax02, ax03, ax04, ax05]
# ]
#-----GeoTacticIndex Plot-------
if 'W1118' in dirs:
fig, ax = plt.subplots(nPlotStacks, gtinParamsToPlot, figsize=(1.8, gtiFigHeight), tight_layout = tightLayout, gridspec_kw = {'height_ratios':figRatio})
fig.subplots_adjust(left=gtiMarginLeft, bottom=gtiMarginBottom, right=gtiMarginRight, top=gtiMarginTop, wspace = wSpace, hspace = hSpace)
for c, gt in enumerate(allGenotypePerUT_Data):
data = np.nanmean(gt[:], axis=0)
sem = stats.sem(gt[:], axis=0)
tPlots = []
i=-1
tp = ax[tSeriesPlotIndex].errorbar(np.arange(len(data[:,i])), data[:,i], yerr=sem[:,i], color=colors[c], fmt='-'+markers[c], label=genotypes[c])
legendHandles, legendLabels = ax[tSeriesPlotIndex].get_legend_handles_labels()
ax[total5MinPlotIndex].legend(handles=legendHandles,labels=legendLabels, bbox_to_anchor=(legendHorPos, gtilegendVerPos),\
loc=2, shadow=True, edgecolor=(0,0,0), fontsize='x-small', ncol=2).draggable()
plotData = dataToPlot[i]
vp = ax[total5MinPlotIndex].violinplot([da for da in plotData], vPlotPos+1, showmeans=showMeans, showmedians=showMedians, showextrema=showExtrema, bw_method=bwMethod)
bp = ax[total5MinPlotIndex].boxplot([da for da in plotData], sym='', medianprops = medianprops, boxprops = boxprops, whiskerprops = whiskerprops, capprops = capprops, zorder=1)
for s,scatterPlotData in enumerate(plotData):
plotScatter(ax[total5MinPlotIndex], scatterPlotData, scatterX = s+1, scatterMarker = sMarkers[s], scatterColor = genotypeMarker[s], zOrder=2)
vp[vPlotLineShow].set_color(medianColor)
for patch, color in zip(vp['bodies'], colors):
patch.set_color(color)
patch.set_edgecolor(None)
patch.set_alpha(vAlpha)
for i in xrange(0, len(axP1)):
plt.setp([ax[i].spines[x].set_visible(False) for x in ['top','right']])
plt.setp(ax[i].yaxis.grid(True, linestyle='-', which='major', color='lightgrey',alpha=0.5))
plt.setp(ax[i].get_yticklabels(), rotation=90, horizontalalignment='center', verticalalignment='center')
plt.setp(ax[i], ylabel = plotYLabels[-1])
plt.setp(ax[i], **axP1[i])
if i==tSeriesPlotIndex:
plt.setp(ax[i], xticks = [0,1,2,3,4], xticklabels = [1,2,3,4,5], xlabel = 'minutes')
plt.setp(ax[total5MinPlotIndex], xlim=[0,len(genotypes)+1], xticks = [0], xticklabels = [])
# plt.setp([axs for axs in ax[tSeriesPlotIndex]], xticks = [0,1,2,3,4], xticklabels = [])
plt.savefig(gtiFigNamePng, dpi=dpi, format='png')
plt.savefig(gtiFigNameSvg, format='svg')
# plt.show()
nParams
sheetNames = ['NumTracks','TrackDuration','TotalDistance',\
'AvSpeed','GeotacticIndex', 'Straightness']
columnHeader = 'TimePoint'
skipheaderCells = 2
("%s%s_climbing5MinutesStats%s.csv"%(saveDir, present_time(), saveFiles))
#---- Save sheet for Per minute data------------
paramBook = xlwt.Workbook(encoding='utf-8', style_compression = 0)
sheets = [paramBook.add_sheet(x, cell_overwrite_ok = True) for x in sheetNames]
for g, gt in enumerate(allGenotypePerUT_Data):
for f, fly in enumerate(gt):
for timepoint in xrange(fly.shape[0]):
for parameter in xrange(fly.shape[1]):
col = g+(timepoint*(len(allGenotypePerUT_Data)+xlGapColumns))
if f==0:
if g==0:
timepointHeader = '%s: %d minute'%(columnHeader, timepoint+1)
sheets[parameter].write(f,col+len(allGenotypePerUT_Data)/2,timepointHeader)
sheets[parameter].write(f+1, col, dirs[g])
row = f+skipheaderCells
sheets[parameter].write(row,col, fly[timepoint, parameter])
#paramBook.save(saveDir + "climbingPerMinuteParameters_genotypesTogether"+saveFiles+".xls")
xlName = "climbingPerMinuteParameters_genotypesTogether"
paramBook.save("%s%s_%s%s.xls"%(saveDir, present_time(), xlName, saveFiles))
paramBook = xlwt.Workbook(encoding='utf-8', style_compression = 0)
sheets = [paramBook.add_sheet(x, cell_overwrite_ok = True) for x in sheetNames]
for g, gt in enumerate(allGenotypePerUT_Data):
for f, fly in enumerate(gt):
for timepoint in xrange(fly.shape[0]):
for parameter in xrange(fly.shape[1]):
col = timepoint+(g*(fly.shape[0]+xlGapColumns))
if f==0:
timepointHeader = '%d minute'%(timepoint+1)
sheets[parameter].write(f+1,col,timepointHeader)
if timepoint==0:
sheets[parameter].write(f, col+len(allGenotypePerUT_Data)/2, dirs[g])
row = f+skipheaderCells
sheets[parameter].write(row,col, fly[timepoint, parameter])
# paramBook.save(saveDir + "climbingPerMinuteParameters_timepointsTogether"+saveFiles+".xls")
xlName = "climbingPerMinuteParameters_timepointsTogether"
paramBook.save("%s%s_%s%s.xls"%(saveDir, present_time(), xlName, saveFiles))
#---- Save sheet for 5minutes data------------
genotypeParams = [genotypeNTracks,
genotypeLenTrack,
genotypeDis,
genotypeAvSpeed,
genotypeGeoTacInd,
genotypeStraight]
paramBook = xlwt.Workbook(encoding='utf-8', style_compression = 0)
sheets = [paramBook.add_sheet(x, cell_overwrite_ok = True) for x in sheetNames]
for s in xrange(len(sheets)):
sheet = sheets[s]
for g in xrange(len(genotypeParams[s])):
for row in xrange(len(genotypeParams[s][g])):
if row==0:
sheet.write(row,g,dirs[g])
sheet.write(row+skipheaderCells,g,genotypeParams[s][g][row])
# paramBook.save(saveDir + "climbingParameters5Minutes_genotypesTogether"+saveFiles+".xls")
xlName = "climbingParameters5Minutes_genotypesTogether"
paramBook.save("%s%s_%s%s.xls"%(saveDir, present_time(), xlName, saveFiles))
| 40.908273
| 216
| 0.634755
|
22b91d2a661475bfe75b45e432a7d09f151ad4d9
| 884
|
py
|
Python
|
custom_components/hacs/helpers/functions/remaining_github_calls.py
|
svinodiyer/integration
|
0a809c57505e3dc4286c020486bbe7a86178f811
|
[
"MIT"
] | null | null | null |
custom_components/hacs/helpers/functions/remaining_github_calls.py
|
svinodiyer/integration
|
0a809c57505e3dc4286c020486bbe7a86178f811
|
[
"MIT"
] | null | null | null |
custom_components/hacs/helpers/functions/remaining_github_calls.py
|
svinodiyer/integration
|
0a809c57505e3dc4286c020486bbe7a86178f811
|
[
"MIT"
] | null | null | null |
"""Helper to calculate the remaining calls to github."""
import math
from custom_components.hacs.utils.logger import getLogger
_LOGGER = getLogger()
async def remaining(github):
"""Helper to calculate the remaining calls to github."""
try:
ratelimits = await github.get_rate_limit()
except (BaseException, Exception) as exception: # pylint: disable=broad-except
_LOGGER.error(exception)
return None
if ratelimits.get("remaining") is not None:
return int(ratelimits["remaining"])
return 0
async def get_fetch_updates_for(github):
"""Helper to calculate the number of repositories we can fetch data for."""
margin = 1000
limit = await remaining(github)
pr_repo = 15
if limit is None:
return None
if limit - margin <= pr_repo:
return 0
return math.floor((limit - margin) / pr_repo)
| 26.787879
| 83
| 0.682127
|
c91ad570304bdb43c1f8f6326c852b0a733cc016
| 2,291
|
py
|
Python
|
1.DeepLearning/deeplink/util.py
|
link-kut/deeplink_public
|
688c379bfeb63156e865d78d0428f97d7d203cc1
|
[
"MIT"
] | null | null | null |
1.DeepLearning/deeplink/util.py
|
link-kut/deeplink_public
|
688c379bfeb63156e865d78d0428f97d7d203cc1
|
[
"MIT"
] | 11
|
2020-01-28T22:33:49.000Z
|
2022-03-11T23:41:08.000Z
|
1.DeepLearning/deeplink/util.py
|
link-kut/deeplink_public
|
688c379bfeb63156e865d78d0428f97d7d203cc1
|
[
"MIT"
] | 2
|
2019-06-01T04:14:52.000Z
|
2020-05-31T08:13:23.000Z
|
# coding: utf-8
import numpy as np
def smooth_curve(x):
"""
http://glowingpython.blogspot.jp/2012/02/convolution-with-numpy.html
"""
window_len = 11
s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
w = np.kaiser(window_len, 2)
y = np.convolve(w/w.sum(), s, mode='valid')
return y[5:len(y)-5]
def shuffle_dataset(x, t):
"""
"""
permutation = np.random.permutation(x.shape[0])
x = x[permutation,:] if x.ndim == 2 else x[permutation,:,:,:]
t = t[permutation]
return x, t
def conv_output_size(input_size, filter_size, stride=1, pad=0):
return (input_size + 2*pad - filter_size) / stride + 1
def im2col(input_data, filter_h, filter_w, stride=1, pad=0):
"""
Parameters
----------
input_data : 이미지 데이터
filter_h : 필터 높이
filter_w : 필터 폭
stride : 스트라이드
pad : 패드
Returns
-------
col : 2차원행렬
"""
N, C, H, W = input_data.shape
out_h = (H + 2 * pad - filter_h) // stride + 1
out_w = (W + 2 * pad - filter_w) // stride + 1
img = np.pad(input_data, [(0,0), (0,0), (pad, pad), (pad, pad)], 'constant')
col = np.zeros((N, C, filter_h, filter_w, out_h, out_w))
for y in range(filter_h):
y_max = y + stride * out_h
for x in range(filter_w):
x_max = x + stride * out_w
col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride]
col = col.transpose(0, 4, 5, 1, 2, 3).reshape(N * out_h * out_w, -1)
return col
def col2im(col, input_shape, filter_h, filter_w, stride=1, pad=0):
"""
Parameters
----------
col :
input_shape : 이미지 데이터 Shape(例:(10, 1, 28, 28))
filter_h
filter_w
stride
pad
Returns
-------
"""
N, C, H, W = input_shape
out_h = (H + 2*pad - filter_h)//stride + 1
out_w = (W + 2*pad - filter_w)//stride + 1
col = col.reshape(N, out_h, out_w, C, filter_h, filter_w).transpose(0, 3, 4, 5, 1, 2)
img = np.zeros((N, C, H + 2*pad + stride - 1, W + 2*pad + stride - 1))
for y in range(filter_h):
y_max = y + stride*out_h
for x in range(filter_w):
x_max = x + stride*out_w
img[:, :, y:y_max:stride, x:x_max:stride] += col[:, :, y, x, :, :]
return img[:, :, pad:H + pad, pad:W + pad]
| 26.034091
| 89
| 0.542558
|
d90b983ecdfbb88e4d517d904141e4d1af1e41bf
| 1,915
|
py
|
Python
|
setup.py
|
fserena/geo-pass
|
bce1f55e1078c074a62bfad9f12ea849122ee135
|
[
"Apache-2.0"
] | 1
|
2018-05-04T10:56:06.000Z
|
2018-05-04T10:56:06.000Z
|
setup.py
|
fserena/geo-pass
|
bce1f55e1078c074a62bfad9f12ea849122ee135
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
fserena/geo-pass
|
bce1f55e1078c074a62bfad9f12ea849122ee135
|
[
"Apache-2.0"
] | null | null | null |
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Ontology Engineering Group
http://www.oeg-upm.net/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2016 Ontology Engineering Group.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import json
from setuptools import setup, find_packages
__author__ = 'Fernando Serena'
with open("geo_pass/metadata.json", 'r') as stream:
metadata = json.load(stream)
setup(
name="geo-pass",
version=metadata['version'],
author=metadata['author'],
author_email=metadata['email'],
description=metadata['description'],
license="Apache 2",
keywords=["overpass", "osm"],
url=metadata['github'],
download_url="https://github.com/fserena/geo-pass/tarball/{}".format(metadata['version']),
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
install_requires=['overpy', 'Flask', 'Flask-Caching', 'LatLon', 'gunicorn', 'futures', 'shapely', 'requests', 'redis',
'redis-simple-cache', 'fuzzywuzzy', 'python-Levenshtein'],
classifiers=[],
package_dir={'geo_pass': 'geo_pass'},
package_data={'geo_pass': ['metadata.json']},
scripts=['geo-pass']
)
| 39.081633
| 122
| 0.580679
|
fd9db010c542cf0f3cb7bf92bbe6f7da737cf63a
| 3,128
|
py
|
Python
|
fatuv/error.py
|
kasicass/fatuv
|
f1baf61ffffa5f753460fbdfadc7c079a3302d0d
|
[
"MIT"
] | 6
|
2019-06-15T17:39:52.000Z
|
2020-01-13T09:07:18.000Z
|
fatuv/error.py
|
kasicass/fatuv
|
f1baf61ffffa5f753460fbdfadc7c079a3302d0d
|
[
"MIT"
] | null | null | null |
fatuv/error.py
|
kasicass/fatuv
|
f1baf61ffffa5f753460fbdfadc7c079a3302d0d
|
[
"MIT"
] | 3
|
2019-01-18T09:14:07.000Z
|
2020-04-03T09:19:13.000Z
|
import io
STATUS_SUCCESS = 0
class UVError(Exception):
pass
class ThreadError(UVError):
pass
class HandleError(UVError):
pass
class HandleClosedError(HandleError):
pass
class AsyncError(HandleError):
pass
class TimerError(HandleError):
pass
class PrepareError(HandleError):
pass
class IdleError(HandleError):
pass
class CheckError(HandleError):
pass
class SignalError(HandleError):
pass
class StreamError(HandleError):
pass
class TCPError(StreamError):
pass
class PipeError(StreamError):
pass
class TTYError(StreamError):
pass
class UDPError(HandleError):
pass
class PollError(HandleError):
pass
class FSError(UVError):
pass
class FSEventError(HandleError):
pass
class FSPollError(HandleError):
pass
class ArgumentError(UVError, ValueError):
""" Invalid arguments. """
class TemporaryUnavailableError(UVError, io.BlockingIOError):
""" Resource temporary unavailable. """
UV_CONSTANTS = {
'UV_RUN_DEFAULT': 0,
'UV_RUN_ONCE': 1,
'UV_RUN_NOWAIT': 2,
'UV_READABLE': 1,
'UV_WRITABLE': 2,
'UV_RENAME': 1,
'UV_CHANGE': 2,
'UV_FS_EVENT_WATCH_ENTRY': 1,
'UV_FS_EVENT_STAT': 2,
'UV_FS_EVENT_RECURSIVE': 4,
'UV_TCP_IPV6ONLY': 1,
'UV_UDP_IPV6ONLY': 1,
'UV_UDP_PARTIAL': 2,
'UV_UDP_REUSEADDR': 4,
'UV_LEAVE_GROUP': 0,
'UV_JOIN_GROUP': 1,
'UV_CREATE_PIPE': 1,
'UV_READABLE_PIPE': 16,
'UV_WRITABLE_PIPE': 32,
'UV_PROCESS_DETACHED': 1 << 3,
'UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS': 1 << 2,
'UV_PROCESS_WINDOWS_HIDE': 1 << 4,
'UV_E2BIG': -7,
'UV_EACCES': -13,
'UV_EADDRINUSE': -98,
'UV_EADDRNOTAVAIL': -99,
'UV_EAFNOSUPPORT': -97,
'UV_EAGAIN': -11,
'UV_EAI_ADDRFAMILY': -3000,
'UV_EAI_AGAIN': -3001,
'UV_EAI_BADFLAGS': -3002,
'UV_EAI_BADHINTS': -3013,
'UV_EAI_CANCELED': -3003,
'UV_EAI_FAIL': -3004,
'UV_EAI_FAMILY': -3005,
'UV_EAI_MEMORY': -3006,
'UV_EAI_NODATA': -3007,
'UV_EAI_NONAME': -3008,
'UV_EAI_OVERFLOW': -3009,
'UV_EAI_PROTOCOL': -3014,
'UV_EAI_SERVICE': -3010,
'UV_EAI_SOCKTYPE': -3011,
'UV_EALREADY': -114,
'UV_EBADF': -9,
'UV_EBUSY': -16,
'UV_ECANCELED': -125,
'UV_ECHARSET': -4080,
'UV_ECONNABORTED': -103,
'UV_ECONNREFUSED': -111,
'UV_ECONNRESET': -104,
'UV_EDESTADDRREQ': -89,
'UV_EEXIST': -17,
'UV_EFAULT': -14,
'UV_EFBIG': -27,
'UV_EHOSTUNREACH': -113,
'UV_EINTR': -4,
'UV_EINVAL': -22,
'UV_EIO': -5,
'UV_EISCONN': -106,
'UV_EISDIR': -21,
'UV_ELOOP': -40,
'UV_EMFILE': -24,
'UV_EMSGSIZE': -90,
'UV_ENAMETOOLONG': -36,
'UV_ENETDOWN': -100,
'UV_ENETUNREACH': -101,
'UV_ENFILE': -23,
'UV_ENOBUFS': -105,
'UV_ENODEV': -19,
'UV_ENOENT': -2,
'UV_ENOMEM': -12,
'UV_ENONET': -64,
'UV_ENOPROTOOPT': -92,
'UV_ENOSPC': -28,
'UV_ENOSYS': -38,
'UV_ENOTCONN': -107,
'UV_ENOTDIR': -20,
'UV_ENOTEMPTY': -39,
'UV_ENOTSOCK': -88,
'UV_ENOTSUP': -95,
'UV_EPERM': -1,
'UV_EPIPE': -32,
'UV_EPROTO': -71,
'UV_EPROTONOSUPPORT': -93,
'UV_EPROTOTYPE': -91,
'UV_ERANGE': -34,
'UV_EROFS': -30,
'UV_ESHUTDOWN': -108,
'UV_ESPIPE': -29,
'UV_ESRCH': -3,
'UV_ETIMEDOUT': -110,
'UV_ETXTBSY': -26,
'UV_EXDEV': -18,
'UV_UNKNOWN': -4094,
'UV_EOF': -4095,
'UV_ENXIO': -6,
'UV_EMLINK': -31,
'UV_EHOSTDOWN': -112
}
| 17.672316
| 61
| 0.683184
|
6b8004c49db9bbdb4707ca697390fe040270e161
| 11,724
|
py
|
Python
|
code/python/FactSetTermsandConditions/v1/fds/sdk/FactSetTermsandConditions/model/lead_underwriters_response.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/FactSetTermsandConditions/v1/fds/sdk/FactSetTermsandConditions/model/lead_underwriters_response.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/FactSetTermsandConditions/v1/fds/sdk/FactSetTermsandConditions/model/lead_underwriters_response.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
FactSet Terms & Conditions API
The FactSet Terms & Conditions API exposes Terms & Conditions data for Fixed Income Corporate, Governement & Agency securities. The FactSet Corporate Government & Agency Terms & Conditions library provides descriptive data on the issue level, such as offering details, redemption information, and coupon schedules. Issuer level data is also available. Coverage is global and includes corporate, sovereign, and agency issues. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetTermsandConditions.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetTermsandConditions.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.FactSetTermsandConditions.model.lead_underwriter import LeadUnderwriter
globals()['LeadUnderwriter'] = LeadUnderwriter
class LeadUnderwritersResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': ([LeadUnderwriter],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""LeadUnderwritersResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([LeadUnderwriter]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""LeadUnderwritersResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([LeadUnderwriter]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.577947
| 443
| 0.582736
|
6d89d328a2e168be1f92552b191a9370f3a91787
| 1,836
|
py
|
Python
|
movie.py
|
mu-lambda/mu-lambda-raytracer
|
e2df010f821eed04ae77a0bc2254ca9b7aaafe3c
|
[
"MIT"
] | null | null | null |
movie.py
|
mu-lambda/mu-lambda-raytracer
|
e2df010f821eed04ae77a0bc2254ca9b7aaafe3c
|
[
"MIT"
] | null | null | null |
movie.py
|
mu-lambda/mu-lambda-raytracer
|
e2df010f821eed04ae77a0bc2254ca9b7aaafe3c
|
[
"MIT"
] | null | null | null |
import subprocess
import os
from math import sqrt, sin, cos, atan2, pi
#step_grad=0.5
#nframes=300 # 200 frames = turn 150 grads
#image_width=640
#samples_per_pixel=5000
step_grad=0.5
nframes=10
image_width=640
samples_per_pixel=200
def render_image(i, lookat, lookfrom):
(x0, y0, z0) = lookat
(x,y,z) = lookfrom
fn = "_movie/frame{:04}".format(i)
f = open(fn + ".ppm", "w")
result = subprocess.run(
["./target/release/raytracer",
"--world=final_scene",
"--seed=42",
"--aspect_ratio=1:1",
"--image_width={}".format(image_width),
"--samples_per_pixel={}".format(samples_per_pixel),
"--lookat={},{},{}".format(x0,y0,z0),
"--lookfrom={},{},{}".format(x,y,z)],
stdout=f)
f.close()
subprocess.run(["convert", fn + ".ppm", fn + ".png" ])
os.remove(fn + ".ppm")
def cart_to_polar(xyz):
(x,y,z) = xyz
r = sqrt(x*x + y*y + z*z)
theta = atan2(sqrt(x*x + y*y), z)
phi = atan2(y,x)
return (r,theta,phi)
def polar_to_cart(rthetaphi):
(r,theta,phi) = rthetaphi
x = r*cos(phi)*sin(theta)
y = r*sin(phi)*sin(theta)
z = r*cos(theta)
return (x,y,z)
def add(a0b0c0, a1b1c1):
(a0,b0,c0) = a0b0c0
(a1,b1,c1) = a1b1c1
return (a0 + a1, b0 + b1, c0 + c1)
def minus(a0b0c0):
(a0,b0,c0) = a0b0c0
return (-a0, -b0, -c0)
origin = (278, 278, 400)
start = (478, 278, -600)
delta = add(start, minus(origin))
p_delta = cart_to_polar(delta)
grad = pi / 180
for i in range(0,nframes):
c_delta = polar_to_cart(p_delta)
print("Frame {}: {}".format(i, c_delta))
render_image(i, origin, add(origin,c_delta))
p_delta = add(p_delta, (0, step_grad*grad, 0))
# ffmpeg -r 30 -f image2 -i _movie/frame\%04d.png -vcodec libx264 -crf 25 test.mpg
| 25.150685
| 82
| 0.578976
|
43a581540f608db2e5249162fff93782fe6b2d46
| 1,389
|
py
|
Python
|
modeling/backbone/build.py
|
donnyyou/centerX
|
6e381cb669a6014d02e31a43915271237690531c
|
[
"Apache-2.0"
] | 350
|
2020-12-01T09:55:16.000Z
|
2020-12-23T13:47:43.000Z
|
modeling/backbone/build.py
|
powerlic/centerX
|
1073753533f26483c3ab053a7d8753708fcacde7
|
[
"Apache-2.0"
] | 39
|
2020-12-24T13:42:29.000Z
|
2022-02-10T01:09:56.000Z
|
modeling/backbone/build.py
|
powerlic/centerX
|
1073753533f26483c3ab053a7d8753708fcacde7
|
[
"Apache-2.0"
] | 49
|
2020-12-01T11:39:14.000Z
|
2020-12-21T01:45:39.000Z
|
from fvcore.common.registry import Registry
import torch.nn as nn
BACKBONE_REGISTRY = Registry("BACKBONE")
BACKBONE_REGISTRY.__doc__ = """
Registry for backbones, which extract feature maps from images
The registered object must be a callable that accepts two arguments:
1. A :class:`detectron2.config.CfgNode`
2. A :class:`detectron2.layers.ShapeSpec`, which contains the input shape specification.
It must returns an instance of :class:`Backbone`.
"""
def build_backbone(cfg):
"""
Build a backbone from `cfg.MODEL.BACKBONE.NAME`.
Returns:
an instance of :class:`Backbone`
"""
backbone_name = cfg.MODEL.BACKBONE.NAME
backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg)
return backbone
def get_norm(cfg, out_channels, momentum=0.1):
"""
Args:
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module.
Returns:
nn.Module or None: the normalization layer
"""
norm = cfg.MODEL.BN_TYPE
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": nn.BatchNorm2d,
# Fixed in https://github.com/pytorch/pytorch/pull/36382
"SyncBN": nn.SyncBatchNorm,
}[norm]
return norm(out_channels, momentum=momentum)
| 32.302326
| 88
| 0.669546
|
bcba3b2c2c06d26b3ef9fb05c8f6c9a32c761a29
| 627
|
py
|
Python
|
tests/management/test_web3.py
|
h8is2w8/aioethereum
|
eb23e28068c34cda28bbef45c3f288d16936d88e
|
[
"MIT"
] | 16
|
2017-10-04T17:44:51.000Z
|
2021-03-07T12:55:04.000Z
|
tests/management/test_web3.py
|
h8is2w8/aioethereum
|
eb23e28068c34cda28bbef45c3f288d16936d88e
|
[
"MIT"
] | 8
|
2017-10-04T22:53:08.000Z
|
2021-01-15T18:04:41.000Z
|
tests/management/test_web3.py
|
h8is2w8/aioethereum
|
eb23e28068c34cda28bbef45c3f288d16936d88e
|
[
"MIT"
] | 5
|
2018-02-22T15:56:34.000Z
|
2021-01-03T21:25:22.000Z
|
import pytest
@pytest.mark.run_loop
def test_call_web3_clientVersion(create_ethereum_client, loop, server):
client = yield from create_ethereum_client(server.http_address,
loop=loop)
response = yield from client.web3_clientVersion()
assert isinstance(response, str)
@pytest.mark.run_loop
def test_call_web3_sha3(create_ethereum_client, loop, server):
client = yield from create_ethereum_client(server.http_address,
loop=loop)
response = yield from client.web3_sha3('0x0')
assert isinstance(response, str)
| 34.833333
| 71
| 0.671451
|
436aa57f10bc965952c330e4b88da66017e8b465
| 2,107
|
py
|
Python
|
bombolone/api/hash_table.py
|
LaudateCorpus1/bombolone
|
2658bfe2ea3e659fe98e86c548e49ff0aa08f0c2
|
[
"BSD-3-Clause"
] | 31
|
2015-01-04T08:38:55.000Z
|
2021-09-05T08:53:03.000Z
|
bombolone/api/hash_table.py
|
LaudateCorpus1/bombolone
|
2658bfe2ea3e659fe98e86c548e49ff0aa08f0c2
|
[
"BSD-3-Clause"
] | 6
|
2015-02-26T12:57:05.000Z
|
2015-06-24T06:11:43.000Z
|
bombolone/api/hash_table.py
|
LaudateCorpus1/bombolone
|
2658bfe2ea3e659fe98e86c548e49ff0aa08f0c2
|
[
"BSD-3-Clause"
] | 11
|
2015-01-09T21:54:05.000Z
|
2021-11-30T04:54:19.000Z
|
# -*- coding: utf-8 -*-
"""
api.hash_table.py
~~~~~~
The Hash Table allows you to store multiple Hash Map,
each of which has an Name Map and an Hash useful to
write the content for use on the web site.
:copyright: (c) 2014 by @zizzamia
:license: BSD (See LICENSE for details)
"""
from flask import Blueprint, request, g
# Imports inside Bombolone
import bombolone.core.hash_table
from bombolone.core.utils import jsonify, set_message
from bombolone.decorators import get_hash, authentication, check_rank
MODULE_DIR = 'admin/hash_table'
api_hash_table = Blueprint('api_hash_table', __name__)
@api_hash_table.route('/api/1.0/hash_table/list.json')
@authentication
@check_rank(10)
@get_hash('hash_table')
def overview():
""" List all the documents, each has a name
that identifies it, and an hash map. """
data = core.hash_table.get_list()
data = set_message(data)
return jsonify(data)
@api_hash_table.route('/api/1.0/hash_table/get.json')
@authentication
@check_rank(10)
@get_hash('hash_table')
def get():
""" """
_id = request.args.get("_id", None)
data = core.hash_table.get(_id)
data = set_message(data)
return jsonify(data)
@api_hash_table.route('/api/1.0/hash_table/new.json', methods=['POST'])
@authentication
@check_rank(10)
@get_hash('hash_table')
def new():
""" Create a new document within the hash table. """
params = request.json
data = core.hash_table.new(params=params)
data = set_message(data)
return jsonify(data)
@api_hash_table.route('/api/1.0/hash_table/remove.json', methods=['DELETE'])
@authentication
@check_rank(10)
def remove():
""" This method removes an hash map"""
_id = request.args.get("_id", None)
data = core.hash_table.remove(_id=_id)
data = set_message(data)
return jsonify(data)
@api_hash_table.route('/api/1.0/hash_table/update.json', methods=['POST'])
@authentication
@check_rank(10)
@get_hash('hash_table')
def update():
""" """
params = request.json
data = core.hash_table.update(params=params, my_rank=g.my['rank'])
data = set_message(data)
return jsonify(data)
| 28.093333
| 76
| 0.707641
|
598d1aee78c5298ffc93ead118968a221d7e3b5d
| 14,686
|
py
|
Python
|
cluster/cluster.py
|
mlfmonde/cluster_cli
|
d2dce9c437668cbfa43fd87a07e0b4365d19f475
|
[
"MIT"
] | null | null | null |
cluster/cluster.py
|
mlfmonde/cluster_cli
|
d2dce9c437668cbfa43fd87a07e0b4365d19f475
|
[
"MIT"
] | 14
|
2018-08-05T23:27:28.000Z
|
2019-01-25T13:40:24.000Z
|
cluster/cluster.py
|
mlfmonde/cluster_cli
|
d2dce9c437668cbfa43fd87a07e0b4365d19f475
|
[
"MIT"
] | 3
|
2018-08-05T23:24:51.000Z
|
2018-11-05T13:38:58.000Z
|
import consulate
import hashlib
import json
import logging
import os
import time
from datetime import datetime
from urllib import parse
from cluster import util
DEFAULT_TIMEOUT = 300
APP_KEY_SEPARATOR = '.' # app key prefix/md5 separator
APP_KV_FIND_PATTERN = 'app/{repo}_{branch}{separator}'
logger = logging.getLogger(__name__)
class Cluster:
_consul_url = None
_consul = None
_nodes = None
def __init__(self, consul_url='http://localhost:8500'):
self._consul_url = parse.urlparse(consul_url)
@property
def consul(self):
if not self._consul:
self._consul = consulate.Consul(
scheme=self._consul_url.scheme,
host=self._consul_url.hostname,
port=self._consul_url.port,
datacenter=None,
token=None,
)
return self._consul
@property
def nodes(self):
if not self._nodes:
self._nodes = [
node['Node'] for node in self.consul.catalog.nodes()
]
return self._nodes
def checks(self, all=False):
"""Display failed checks per nodes
:param all: if you want to see more
:return: a dict of checks per node::
{
'node1': {
'service-id1': {
checks: [(check name, status, error message), ...],
name: "Service 1",
}
'service-id2': {
checks: [(check name, status, error message), ...],
...
},
},
'node2': ...
}
"""
if all:
warn_states = ["unknown", "passing", "warning", "critical"]
else:
warn_states = ["unknown", "warning", "critical"]
checks = {}
for warn_state in warn_states:
for state in self.consul.health.state(warn_state):
if not state['Node'] in checks:
checks[state['Node']] = dict()
if not state['ServiceID'] in checks[state['Node']]:
checks[state['Node']][state['ServiceID']] = {
'checks': [],
'name': state['ServiceName']
}
checks[state['Node']][state['ServiceID']]['checks'].append(
(state['Name'], state['Status'], state['Output'])
)
return checks
def get_kv_application(self, repo_name, branch):
apps = self.consul.kv.find(APP_KV_FIND_PATTERN.format(
repo=repo_name,
branch=branch,
separator=APP_KEY_SEPARATOR
)
)
if not apps:
return None, None
if len(apps) > 1:
raise RuntimeError(
"Repo / branch are ambiguous, multiple keys ({}) found for"
"given repo: {}, branch: {}".format(
sorted(apps.keys()), repo_name, branch
)
)
key, data = apps.popitem()
return key, util.json2obj(data)
def deploy(
self,
repo_name,
branch,
master=None,
slave=None,
no_wait=False,
timeout=DEFAULT_TIMEOUT,
ask_user=True,
update=False
):
key, app = self.get_kv_application(repo_name, branch)
if master and slave and master == slave:
raise RuntimeError("Master and slave must be different")
new_master = master
new_slave = slave
if not app:
if not master:
raise RuntimeError(
"Deploying a new service require a master"
)
repo_url, branch = repo_name.strip(), branch.strip()
if repo_url.endswith('.git'):
repo_url = repo_url[:-4]
md5 = hashlib.md5(
parse.urlparse(repo_url.lower()).path.encode('utf-8')
).hexdigest()
repo_name = os.path.basename(repo_url.strip('/').lower())
key = 'app/' + repo_name + (
'_' + branch if branch else ''
) + '.' + md5[:5] # don't need full md5
else:
if app.slave:
# taht was a replicated service => that will carry on to be a
# replicate service because the auto detect switch guess the
# slave
if not new_master:
new_master = app.slave
if not new_slave:
new_slave = app.master
if new_master == new_slave:
if master:
new_slave = app.slave
if slave:
new_master = app.master
else:
# was a master only service
if not new_master:
new_master = app.master
repo_url = app.repo_url
branch = app.branch
if new_master == new_slave:
raise RuntimeError("Master and slave must be different")
if new_master not in self.nodes:
raise RuntimeError(
"Can't deploy to unknown master (node host: {})".format(
new_master
)
)
if new_slave is not None and new_slave not in self.nodes:
raise RuntimeError(
"Can't deploy using unknown slave (node host: {}) ".format(
new_slave
)
)
if ask_user:
print(
"You are going to move following app {} from "
"[master: {} - replicate: {}] to "
"[master: {} - replicate: {}]".format(
key,
app.master if app else None,
app.slave if app else None,
new_master,
new_slave
)
)
answer = util.get_input("Please confim by entering 'yes': ")
if answer.strip().lower() != 'yes':
print("Not confirmed, Aborting")
logger.warning("Not confirmed. Aborting")
return
self._deploy(
key,
repo_url,
branch,
new_master,
slave=new_slave,
no_wait=no_wait,
timeout=timeout,
update=update
)
def move_masters_from(
self,
node,
master=None,
no_wait=False,
timeout=DEFAULT_TIMEOUT,
ask_user=True
):
move_apps = []
for key, value in self.consul.kv.find('app/').items():
app = util.json2obj(value)
if app.master == node:
mstr = app.slave
if not mstr:
if not master:
raise RuntimeError(
"You must define a default master (--master) as "
"there are some services (at least {}) without "
"replicate (slave)".format(key)
)
if master not in self.nodes:
raise RuntimeError(
"The given default master hostname: {} is "
"unknown. Available nodes: {}".format(
master, self.nodes
)
)
if master == node:
raise RuntimeError(
"You must provide a different default master: {} "
"it must be different to the node that you want"
"clear: {}".format(
master, node
)
)
mstr = master
move_apps.append(
(
key,
app,
mstr,
app.master if app.slave else None,
)
)
if ask_user:
print("You are going to move following apps:")
for key, app, mstr, _ in move_apps:
print(" - from {} to {}, project: {}".format(
app.master,
mstr,
key
))
answer = util.get_input("Please confim by entering 'yes': ")
if answer.strip().lower() != 'yes':
print("Not confirmed, Aborting")
logger.warning("Not confirmed. Aborting")
return
for key, app, mstr, slave in move_apps:
self._deploy(
key,
app.repo_url,
app.branch,
mstr,
slave=slave,
no_wait=no_wait,
timeout=timeout
)
def inspect_node(
self,
node,
):
master_apps = []
for key, value in self.consul.kv.find('app/').items():
app = util.json2obj(value)
if app.master == node:
master_apps.append(key)
print("Master apps of node {node}:".format(node=node))
print("\n".join(sorted(master_apps)))
# communicate with consul
def _deploy(
self,
kv_key,
repo_url,
branch,
master,
slave=None,
no_wait=False,
timeout=DEFAULT_TIMEOUT,
event_consumed=None,
update=False
):
"""Deploy a service waiting the end end of deployment before carry on
"""
def deploy_finished(kv_app_before, kv_app_after, *args, **kwargs):
if kv_app_before and kv_app_after:
if kv_app_after.deploy_date > kv_app_before.deploy_date:
return True
else:
return False
else:
if not kv_app_before:
if kv_app_after:
return True
else:
return False
else:
return False
if not event_consumed:
event_consumed = deploy_finished
self._fire_event(
kv_key,
'deploy',
json.dumps(
{
'repo': repo_url,
'branch': branch,
'master': master,
'slave': slave,
'update': update
}
),
no_wait,
event_consumed,
timeout
)
# move as classmethod to easly reuse it in unittest
@classmethod
def migrate_finished(
cls, kv_app_before, kv_app_after, maintenance=None, self=None,
**kwargs
):
if maintenance:
self.was_maintenance = True
if self.was_maintenance and not maintenance:
return True
return False
def migrate(
self,
source_repo,
source_branch,
target_branch,
target_repo=None,
no_wait=False,
timeout=DEFAULT_TIMEOUT,
ask_user=True,
no_update=False
):
if not target_repo:
target_repo = source_repo
if target_branch in ['prod', 'production']:
raise RuntimeError(
"You can't migrate data to production branch using this script"
)
source_key, source_app = self.get_kv_application(
source_repo, source_branch
)
if not source_app:
raise RuntimeError(
"Source service (repo: {}, branch: {}) not found".format(
source_repo,
source_branch
)
)
target_key, target_app = self.get_kv_application(
target_repo, target_branch
)
if not target_app:
raise RuntimeError(
"Target service (repo: {}, branch: {}) not found".format(
target_repo,
target_branch
)
)
self.was_maintenance = False
if ask_user:
print(
"You are on the way to replace common docker volumes on "
" service {} by data from {}".format(
target_key, source_key
)
)
answer = util.get_input("Please confim by entering 'yes': ")
if answer.strip().lower() != 'yes':
print("Not confirmed, Aborting")
logger.warning("Not confirmed. Aborting")
return
self._fire_event(
target_key,
'migrate',
json.dumps(
{
'repo': source_app.repo_url,
'branch': source_app.branch,
'target': {
'repo': target_app.repo_url,
'branch': target_app.branch
},
'update': not no_update
}
),
no_wait,
Cluster.migrate_finished,
timeout
)
def _fire_event(
self,
kv_key,
event_name,
payload,
no_wait,
event_consumed,
timeout
):
app_before = util.json2obj(self.consul.kv.get(kv_key))
logger.info(
"Emit %s event for kv key: %s with following payload: %r",
event_name, kv_key, payload
)
event_id = self.consul.event.fire(
event_name, payload
)
start_date = datetime.now()
while not no_wait and not event_consumed(
app_before,
util.json2obj(self.consul.kv.get(kv_key)),
maintenance=self.consul.kv.get_record(
kv_key.replace("app/", "maintenance/")
),
self=self
):
time.sleep(1)
if (datetime.now() - start_date).seconds > timeout:
raise TimeoutError(
"Event (id: {}) was not processed in the expected time"
" ({}s),".format(event_id, timeout)
)
logger.info(
"Event %s takes %ss to consume",
event_name, (datetime.now() - start_date).seconds
)
return event_id
| 32.135667
| 79
| 0.456557
|
60ff8d214ced3c555fadfc2976c7fb03eb0e297b
| 800
|
py
|
Python
|
weather.py
|
mirosval/mLamp-Raspberry
|
68b51a85b65db52e40132ed9ec258c3564052f07
|
[
"MIT"
] | null | null | null |
weather.py
|
mirosval/mLamp-Raspberry
|
68b51a85b65db52e40132ed9ec258c3564052f07
|
[
"MIT"
] | null | null | null |
weather.py
|
mirosval/mLamp-Raspberry
|
68b51a85b65db52e40132ed9ec258c3564052f07
|
[
"MIT"
] | null | null | null |
from __future__ import print_function, division
import urllib.request
import json
import logging
import sys
logging.basicConfig(filename="weather.log", level=logging.DEBUG, format='%(asctime)s %(message)s')
# here you can actually configure the location for which you want to retrieve the weather data
base_url = "http://api.openweathermap.org/data/2.5/forecast/daily?q=Prague,cz&mode=json&units=metric&cnt=1"
try:
response = urllib.request.urlopen(base_url)
weather_forecast = json.loads(response.read().decode())
except:
logging.error("Failed to connect to the weather server")
sys.exit(1)
forecast_temp = weather_forecast['list'][0]['temp']['day']
logging.info("Forecast Temp: {}".format(forecast_temp))
with open("daily_temp.txt", 'w') as f:
f.write(str(forecast_temp))
| 32
| 107
| 0.74875
|
c8e09f9ce3cd71e0c6cda652a8e313028237d413
| 4,611
|
py
|
Python
|
components/solax_x1/sensor.py
|
dwar/esphome-modbus-solax-x1
|
edf43772a4734d28f56840d69abac29ad7851155
|
[
"Apache-2.0"
] | 8
|
2021-11-14T21:16:52.000Z
|
2022-03-23T10:02:20.000Z
|
components/solax_x1/sensor.py
|
dwar/esphome-modbus-solax-x1
|
edf43772a4734d28f56840d69abac29ad7851155
|
[
"Apache-2.0"
] | 10
|
2021-07-09T13:03:18.000Z
|
2022-03-23T09:55:03.000Z
|
components/solax_x1/sensor.py
|
dwar/esphome-modbus-solax-x1
|
edf43772a4734d28f56840d69abac29ad7851155
|
[
"Apache-2.0"
] | 4
|
2021-09-13T11:36:28.000Z
|
2022-03-20T12:03:02.000Z
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import sensor
from esphome.const import (
CONF_MODE,
CONF_TEMPERATURE,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_EMPTY,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
ICON_COUNTER,
ICON_CURRENT_AC,
ICON_EMPTY,
ICON_TIMER,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
UNIT_AMPERE,
UNIT_CELSIUS,
UNIT_EMPTY,
UNIT_HERTZ,
UNIT_VOLT,
UNIT_WATT,
)
from . import CONF_SOLAX_X1_ID, SolaxX1
DEPENDENCIES = ["solax_x1"]
CONF_ENERGY_TODAY = "energy_today"
CONF_ENERGY_TOTAL = "energy_total"
CONF_DC1_CURRENT = "dc1_current"
CONF_DC1_VOLTAGE = "dc1_voltage"
CONF_DC2_CURRENT = "dc2_current"
CONF_DC2_VOLTAGE = "dc2_voltage"
CONF_AC_CURRENT = "ac_current"
CONF_AC_VOLTAGE = "ac_voltage"
CONF_AC_FREQUENCY = "ac_frequency"
CONF_AC_POWER = "ac_power"
CONF_RUNTIME_TOTAL = "runtime_total"
CONF_ERROR_BITS = "error_bits"
UNIT_HOURS = "h"
UNIT_KILO_WATT_HOURS = "kWh"
ICON_MODE = "mdi:heart-pulse"
ICON_ERROR_BITS = "mdi:alert-circle-outline"
SENSORS = [
CONF_ENERGY_TODAY,
CONF_ENERGY_TOTAL,
CONF_DC1_CURRENT,
CONF_DC1_VOLTAGE,
CONF_DC2_CURRENT,
CONF_DC2_VOLTAGE,
CONF_AC_CURRENT,
CONF_AC_VOLTAGE,
CONF_AC_FREQUENCY,
CONF_AC_POWER,
CONF_RUNTIME_TOTAL,
CONF_ERROR_BITS,
CONF_MODE,
CONF_TEMPERATURE,
]
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(CONF_SOLAX_X1_ID): cv.use_id(SolaxX1),
cv.Optional(CONF_ENERGY_TODAY): sensor.sensor_schema(
UNIT_KILO_WATT_HOURS,
ICON_COUNTER,
3,
DEVICE_CLASS_ENERGY,
STATE_CLASS_TOTAL_INCREASING,
),
cv.Optional(CONF_ENERGY_TOTAL): sensor.sensor_schema(
UNIT_KILO_WATT_HOURS,
ICON_COUNTER,
3,
DEVICE_CLASS_ENERGY,
STATE_CLASS_TOTAL_INCREASING,
),
cv.Optional(CONF_DC1_CURRENT): sensor.sensor_schema(
UNIT_AMPERE,
ICON_EMPTY,
1,
DEVICE_CLASS_CURRENT,
STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_DC1_VOLTAGE): sensor.sensor_schema(
UNIT_VOLT,
ICON_EMPTY,
1,
DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_DC2_CURRENT): sensor.sensor_schema(
UNIT_AMPERE,
ICON_EMPTY,
1,
DEVICE_CLASS_CURRENT,
STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_DC2_VOLTAGE): sensor.sensor_schema(
UNIT_VOLT,
ICON_EMPTY,
1,
DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_AC_CURRENT): sensor.sensor_schema(
UNIT_AMPERE,
ICON_EMPTY,
2,
DEVICE_CLASS_CURRENT,
STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_AC_VOLTAGE): sensor.sensor_schema(
UNIT_VOLT,
ICON_EMPTY,
1,
DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_AC_FREQUENCY): sensor.sensor_schema(
UNIT_HERTZ,
ICON_CURRENT_AC,
1,
DEVICE_CLASS_EMPTY,
STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_AC_POWER): sensor.sensor_schema(
UNIT_WATT,
ICON_EMPTY,
0,
DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT,
),
cv.Optional(CONF_RUNTIME_TOTAL): sensor.sensor_schema(
UNIT_HOURS,
ICON_TIMER,
0,
DEVICE_CLASS_EMPTY,
STATE_CLASS_TOTAL_INCREASING,
),
cv.Optional(CONF_ERROR_BITS): sensor.sensor_schema(
UNIT_EMPTY, ICON_ERROR_BITS, 0, DEVICE_CLASS_EMPTY
),
cv.Optional(CONF_MODE): sensor.sensor_schema(
UNIT_EMPTY, ICON_MODE, 0, DEVICE_CLASS_EMPTY
),
cv.Optional(CONF_TEMPERATURE): sensor.sensor_schema(
UNIT_CELSIUS,
ICON_EMPTY,
1,
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
),
}
)
async def to_code(config):
hub = await cg.get_variable(config[CONF_SOLAX_X1_ID])
for key in SENSORS:
if key in config:
conf = config[key]
sens = await sensor.new_sensor(conf)
cg.add(getattr(hub, f"set_{key}_sensor")(sens))
| 26.964912
| 62
| 0.618521
|
abb5b8e0e16c2a4aa7e2e70a5716cd0d60eaf61d
| 550
|
py
|
Python
|
src/day17b.py
|
MKuranowski/AdventOfCode2021
|
55a1da3e410ff2ccb8a80f9b7ae97e6b1d759195
|
[
"WTFPL"
] | null | null | null |
src/day17b.py
|
MKuranowski/AdventOfCode2021
|
55a1da3e410ff2ccb8a80f9b7ae97e6b1d759195
|
[
"WTFPL"
] | null | null | null |
src/day17b.py
|
MKuranowski/AdventOfCode2021
|
55a1da3e410ff2ccb8a80f9b7ae97e6b1d759195
|
[
"WTFPL"
] | null | null | null |
from fileinput import FileInput
from day17a import BLTR, ShotResult, shoot
def brute_force_all(target: BLTR) -> int:
count = 0
for vel_x in range(1, target.right+1):
for vel_y in range(-100, 500):
result, _ = shoot(vel_x, vel_y, target)
if result == ShotResult.WITHIN:
count += 1
elif result == ShotResult.OVERSHOT:
break
return count
if __name__ == "__main__":
target = BLTR.from_input(next(FileInput()).strip())
print(brute_force_all(target))
| 22
| 55
| 0.605455
|
26a3947cb3b8f5d2707912b5436ecf6c14a89a08
| 1,231
|
py
|
Python
|
irco/logging.py
|
GaretJax/irco
|
e5df3cf1a608dc813011a1ee7e920637e5bd155c
|
[
"MIT"
] | null | null | null |
irco/logging.py
|
GaretJax/irco
|
e5df3cf1a608dc813011a1ee7e920637e5bd155c
|
[
"MIT"
] | null | null | null |
irco/logging.py
|
GaretJax/irco
|
e5df3cf1a608dc813011a1ee7e920637e5bd155c
|
[
"MIT"
] | 1
|
2015-12-17T19:18:28.000Z
|
2015-12-17T19:18:28.000Z
|
from __future__ import absolute_import, print_function
import logging
import os
import sys
logging.basicConfig(
filename=os.environ.get('IRCO_LOGFILE', 'irco.log'),
level=logging.INFO,
)
from structlog import get_logger, configure
from structlog.stdlib import LoggerFactory
__all__ = ['get_logger']
configure(logger_factory=LoggerFactory())
from raven import Client
from irco.conf import settings
from irco import __version__
def make_excepthook(client):
def excepthook(*exc_info):
ident = client.get_ident(client.captureException(exc_info))
r = sys.__excepthook__(*exc_info)
print('-' * 80)
print(' This exception was logged remotely. Please use the following'
' ID when\n seeking support:', ident)
print('-' * 80)
return r
return excepthook
enabled = settings.getboolean('logging', 'sentry')
dsn = settings.get('logging', 'sentry_dsn')
if enabled:
sentry = Client(dsn)
sentry.tags_context({
'version': __version__,
})
if dsn:
sys.excepthook = make_excepthook(sentry)
else:
# Hide disabled sentry reporting
logging.getLogger('raven.base.Client').setLevel(logging.WARNING)
sentry = Client('')
| 23.226415
| 78
| 0.696182
|
7ce89c46f636fde71ee0a887ac7403a640c90ce5
| 1,781
|
py
|
Python
|
example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py
|
DottaPaperella/TALight
|
580322c3121c9acde9827f996fd4e39e31d93a6f
|
[
"MIT"
] | null | null | null |
example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py
|
DottaPaperella/TALight
|
580322c3121c9acde9827f996fd4e39e31d93a6f
|
[
"MIT"
] | null | null | null |
example_problems/tutorial/tiling_mxn-boards_with_1x2-boards/services/tell_if_tilable/tell_if_tilable_server.py
|
DottaPaperella/TALight
|
580322c3121c9acde9827f996fd4e39e31d93a6f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from sys import stderr, exit, argv
from random import randrange
#from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
# METADATA OF THIS TAL_SERVICE:
problem="tiling_mxn-boards_with_1x2-boards"
service="is_tilable"
args_list = [
('m',int),
('n',int),
('my_conjecture',str),
('h',int),
('k',int),
('lang',str),
('ISATTY',bool),
]
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
TAc.print(LANG.opening_msg, "green")
# START CODING YOUR SERVICE:
assert ENV['h']==1
assert ENV['k']==2
print()
if (ENV['m'] * ENV['n']) % 2 == 1:
if ENV['my_conjecture'] == "yes":
TAc.NO()
print(LANG.render_feedback("FALSE-is-not-tilable", f"Contrary to what you have asserted, the {ENV['m']}x{ENV['n']}-grid is NOT tilable. If you are not convinced you can submit a tiling of that grid to the service 'check_my_tiling'."))
if ENV['my_conjecture'] == "no":
TAc.OK()
print(LANG.render_feedback("TRUE-is-not-tilable", f"You are perfecty right: the {ENV['m']}x{ENV['n']}-grid is NOT tilable."))
if (ENV['m'] * ENV['n']) % 2 == 0:
if ENV['my_conjecture'] == "yes":
TAc.OK()
print(LANG.render_feedback("TRUE-is-tilable", f"We agree on the fact that the {ENV['m']}x{ENV['n']}-grid is tilable. If you want to exhibit us a tiling for this grid you can submit it to the service 'check_my_tiling'."))
if ENV['my_conjecture'] == "no":
TAc.NO()
print(LANG.render_feedback("FALSE-is-tilable", f"No, the {ENV['m']}x{ENV['n']}-grid is tilable. If you can not believe a tiling of the {ENV['m']}x{ENV['n']}-grid exists try the service 'gimme_hints_on_a_tiling'."))
exit(0)
| 35.62
| 242
| 0.64009
|
77f3b8fa3a2964082bf118f74cab35e064849264
| 1,309
|
py
|
Python
|
Data Structures and Algorithms/Graphs/08. Kruskal.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | 1
|
2021-07-15T18:40:26.000Z
|
2021-07-15T18:40:26.000Z
|
Data Structures and Algorithms/Graphs/08. Kruskal.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | null | null | null |
Data Structures and Algorithms/Graphs/08. Kruskal.py
|
luckyrabbit85/Python
|
ed134fd70b4a7b84b183b87b85ad5190f54c9526
|
[
"MIT"
] | null | null | null |
import DisjointSet as dst
class Graph:
def __init__(self, vertices):
self.V = vertices
self.graph = []
self.nodes = []
self.MST = []
def addEdge(self, s, d, w):
self.graph.append([s, d, w])
def addNode(self, value):
self.nodes.append(value)
def printSolution(self, s, d, w):
for s, d, w in self.MST:
print("%s - %s: %s" % (s, d, w))
def kruskalAlgo(self):
i, e = 0, 0
ds = dst.DisjointSet(self.nodes)
self.graph = sorted(self.graph, key=lambda item: item[2])
while e < self.V - 1:
s, d, w = self.graph[i]
i += 1
x = ds.find(s)
y = ds.find(d)
if x != y:
e += 1
self.MST.append([s, d, w])
ds.union(x, y)
self.printSolution(s, d, w)
g = Graph(5)
g.addNode("A")
g.addNode("B")
g.addNode("C")
g.addNode("D")
g.addNode("E")
g.addEdge("A", "B", 5)
g.addEdge("A", "C", 13)
g.addEdge("A", "E", 15)
g.addEdge("B", "A", 5)
g.addEdge("B", "C", 10)
g.addEdge("B", "D", 8)
g.addEdge("C", "A", 13)
g.addEdge("C", "B", 10)
g.addEdge("C", "E", 20)
g.addEdge("C", "D", 6)
g.addEdge("D", "B", 8)
g.addEdge("D", "C", 6)
g.addEdge("E", "A", 15)
g.addEdge("E", "C", 20)
g.kruskalAlgo()
| 22.186441
| 65
| 0.477464
|
38c2a8e9eeed9984e1511aa6d206786b3d573459
| 39,011
|
bzl
|
Python
|
tools/arm_compiler/cc_toolchain_config.bzl
|
colatkinson/bazel_arm_toolchain
|
01eaaf12f48c59a1d3569f480d1cc7bc472173bf
|
[
"Apache-2.0"
] | null | null | null |
tools/arm_compiler/cc_toolchain_config.bzl
|
colatkinson/bazel_arm_toolchain
|
01eaaf12f48c59a1d3569f480d1cc7bc472173bf
|
[
"Apache-2.0"
] | null | null | null |
tools/arm_compiler/cc_toolchain_config.bzl
|
colatkinson/bazel_arm_toolchain
|
01eaaf12f48c59a1d3569f480d1cc7bc472173bf
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Tests compiling using an external Linaro toolchain on a Linux machine
#
"""Implementation of a rule that configures a Linaro toolchain."""
load(
"@bazel_tools//tools/cpp:cc_toolchain_config_lib.bzl",
"action_config",
"feature",
"flag_group",
"flag_set",
"tool",
"tool_path",
"with_feature_set",
)
load("@bazel_tools//tools/build_defs/cc:action_names.bzl", "ACTION_NAMES")
def _impl(ctx):
if (ctx.attr.cpu == "armeabi-v7a"):
toolchain_identifier = "armeabi-v7a"
elif (ctx.attr.cpu == "armeabi-v6"):
toolchain_identifier = "armeabi-v6"
elif (ctx.attr.cpu == "k8"):
toolchain_identifier = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
host_system_name = "armeabi-v7a"
elif (ctx.attr.cpu == "armeabi-v6"):
host_system_name = "armeabi-v6"
elif (ctx.attr.cpu == "k8"):
host_system_name = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
target_system_name = "arm_a15"
elif (ctx.attr.cpu == "armeabi-v6"):
target_system_name = "arm_1176"
elif (ctx.attr.cpu == "k8"):
target_system_name = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
target_cpu = "armeabi-v7a"
elif (ctx.attr.cpu == "armeabi-v6"):
target_cpu = "armeabi-v6"
elif (ctx.attr.cpu == "k8"):
target_cpu = "k8"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
target_libc = "glibc_2.19"
elif (ctx.attr.cpu == "armeabi-v6"):
target_libc = "glibc_2.19"
elif (ctx.attr.cpu == "k8"):
target_libc = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "k8"):
compiler = "compiler"
elif (ctx.attr.cpu == "armeabi-v7a"):
compiler = "gcc"
elif (ctx.attr.cpu == "armeabi-v6"):
compiler = "gcc"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
abi_version = "gcc"
elif (ctx.attr.cpu == "armeabi-v6"):
abi_version = "gcc"
elif (ctx.attr.cpu == "k8"):
abi_version = "local"
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
abi_libc_version = "glibc_2.19"
elif (ctx.attr.cpu == "armeabi-v6"):
abi_libc_version = "glibc_2.19"
elif (ctx.attr.cpu == "k8"):
abi_libc_version = "local"
else:
fail("Unreachable")
cc_target_os = None
builtin_sysroot = None
all_link_actions = [
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
]
if (ctx.attr.cpu == "armeabi-v7a"):
objcopy_embed_data_action = action_config(
action_name = "objcopy_embed_data",
enabled = True,
tools = [
tool(path = "linaro_linux_gcc/arm-linux-gnueabihf-objcopy"),
],
)
elif (ctx.attr.cpu == "armeabi-v6"):
objcopy_embed_data_action = action_config(
action_name = "objcopy_embed_data",
enabled = True,
tools = [
tool(path = "raspi_linux_gcc/arm-linux-gnueabihf-objcopy"),
],
)
elif (ctx.attr.cpu == "k8"):
objcopy_embed_data_action = action_config(
action_name = "objcopy_embed_data",
enabled = True,
tools = [tool(path = "/usr/bin/objcopy")],
)
else:
objcopy_embed_data_action = None
action_configs = [objcopy_embed_data_action]
if (ctx.attr.cpu == "k8"):
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-fno-canonical-system-headers",
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
],
),
],
),
],
)
elif (ctx.attr.cpu == "armeabi-v7a" or ctx.attr.cpu == "armeabi-v6"):
unfiltered_compile_flags_feature = feature(
name = "unfiltered_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-no-canonical-prefixes",
"-Wno-builtin-macro-redefined",
"-D__DATE__=\"redacted\"",
"-D__TIMESTAMP__=\"redacted\"",
"-D__TIME__=\"redacted\"",
],
),
],
),
],
)
else:
unfiltered_compile_flags_feature = None
supports_dynamic_linker_feature = feature(name = "supports_dynamic_linker", enabled = True)
if (ctx.attr.cpu == "armeabi-v7a"):
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"--sysroot=external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc",
"-mfloat-abi=hard",
"-nostdinc",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/lib/gcc/arm-linux-gnueabihf/5.3.1/include",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc/usr/include",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/lib/gcc/arm-linux-gnueabihf/5.3.1/include-fixed",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc/usr/include",
"-U_FORTIFY_SOURCE",
"-fstack-protector",
"-fPIE",
"-fdiagnostics-color=always",
"-Wall",
"-Wunused-but-set-parameter",
"-Wno-free-nonheap-object",
"-fno-omit-frame-pointer",
],
),
],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-g"])],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-g0",
"-O2",
"-DNDEBUG",
"-ffunction-sections",
"-fdata-sections",
],
),
],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-std=c++11",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/include/c++/5.3.1/arm-linux-gnueabihf",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/include/c++/5.3.1",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/include/c++/5.3.1/arm-linux-gnueabihf",
"-isystem",
"external/org_linaro_components_toolchain_gcc_5_3_1/include/c++/5.3.1",
],
),
],
),
],
)
elif (ctx.attr.cpu == "armeabi-v6"):
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"--sysroot=external/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/libc",
"-mfloat-abi=hard",
"-nostdinc",
"-isystem",
"external/raspi_components_toolchain_gcc_4_8_3/lib/gcc/arm-linux-gnueabihf/4.8.3/include",
"-isystem",
"external/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/libc/usr/include",
"-isystem",
"external/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/libc/usr/include/arm-linux-gnueabihf",
"-isystem",
"external/raspi_components_toolchain_gcc_4_8_3/lib/gcc/arm-linux-gnueabihf/4.8.3/include-fixed",
"-isystem",
"external/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/libc/usr/include",
"-U_FORTIFY_SOURCE",
"-fstack-protector",
"-fPIE",
# "-fdiagnostics-color=always",
"-Wall",
"-Wunused-but-set-parameter",
"-Wno-free-nonheap-object",
"-fno-omit-frame-pointer",
],
),
],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-g"])],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-g0",
"-O2",
"-DNDEBUG",
"-ffunction-sections",
"-fdata-sections",
],
),
],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-std=c++11",
"-isystem",
"external/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/include/c++/4.8.3/arm-linux-gnueabihf",
"-isystem",
"external/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/include/c++/4.8.3",
"-isystem",
"external/raspi_components_toolchain_gcc_4_8_3/include/c++/4.8.3/arm-linux-gnueabihf",
"-isystem",
"external/raspi_components_toolchain_gcc_4_8_3/include/c++/4.8.3",
],
),
],
),
],
)
elif (ctx.attr.cpu == "k8"):
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-U_FORTIFY_SOURCE",
"-D_FORTIFY_SOURCE=2",
"-fstack-protector",
"-Wall",
"-Wl,-z,-relro,-z,now",
"-Wunused-but-set-parameter",
"-Wno-free-nonheap-object",
"-fno-omit-frame-pointer",
],
),
],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-g"])],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = [
"-g0",
"-O2",
"-DNDEBUG",
"-ffunction-sections",
"-fdata-sections",
],
),
],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-std=c++0x"])],
),
],
)
else:
default_compile_flags_feature = None
supports_pic_feature = feature(name = "supports_pic", enabled = True)
opt_feature = feature(name = "opt")
user_compile_flags_feature = feature(
name = "user_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
sysroot_feature = feature(
name = "sysroot",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
],
flag_groups = [
flag_group(
flags = ["--sysroot=%{sysroot}"],
expand_if_available = "sysroot",
),
],
),
],
)
if (ctx.attr.cpu == "armeabi-v7a"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = [
"--sysroot=external/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc",
"-lstdc++",
"-Wl,-Bstatic",
"-latomic",
"-Wl,-Bdynamic",
"-lm",
"-lpthread",
"-Ltools/arm_compiler/linaro_linux_gcc/clang_more_libs",
"-Lexternal/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/lib",
"-Lexternal/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc/lib",
"-Lexternal/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/libc/usr/lib",
"-Bexternal/org_linaro_components_toolchain_gcc_5_3_1/arm-linux-gnueabihf/bin",
"-Wl,--dynamic-linker=/lib/ld-linux-armhf.so.3",
"-no-canonical-prefixes",
"-pie",
"-Wl,-z,relro,-z,now",
],
),
],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["-Wl,--gc-sections"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
elif (ctx.attr.cpu == "armeabi-v6"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = [
"--sysroot=external/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/libc",
"-static-libgcc",
"-static-libstdc++",
"-Wl,-Bstatic",
"-lstdc++",
"-latomic",
"-lm",
"-Wl,-Bdynamic",
"-lpthread",
"-Ltools/arm_compiler/linaro_linux_gcc/clang_more_libs",
"-Lexternal/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/lib",
"-Lexternal/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/libc/lib",
"-Lexternal/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/libc/usr/lib",
"-Bexternal/raspi_components_toolchain_gcc_4_8_3/arm-linux-gnueabihf/bin",
"-Wl,--dynamic-linker=/lib/ld-linux-armhf.so.3",
"-no-canonical-prefixes",
"-pie",
"-Wl,-z,relro,-z,now",
],
),
],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["-Wl,--gc-sections"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
elif (ctx.attr.cpu == "k8"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = all_link_actions,
flag_groups = [
flag_group(
flags = [
"-lstdc++",
"-lm",
"-Wl,-no-as-needed",
"-pass-exit-codes",
],
),
],
),
flag_set(
actions = all_link_actions,
flag_groups = [flag_group(flags = ["-Wl,--gc-sections"])],
with_features = [with_feature_set(features = ["opt"])],
),
],
)
else:
default_link_flags_feature = None
objcopy_embed_flags_feature = feature(
name = "objcopy_embed_flags",
enabled = True,
flag_sets = [
flag_set(
actions = ["objcopy_embed_data"],
flag_groups = [flag_group(flags = ["-I", "binary"])],
),
],
)
dbg_feature = feature(name = "dbg")
if (ctx.attr.cpu == "k8"):
features = [
default_compile_flags_feature,
default_link_flags_feature,
supports_dynamic_linker_feature,
supports_pic_feature,
objcopy_embed_flags_feature,
opt_feature,
dbg_feature,
user_compile_flags_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
]
elif (ctx.attr.cpu == "armeabi-v7a" or ctx.attr.cpu == "armeabi-v6"):
features = [
default_compile_flags_feature,
default_link_flags_feature,
supports_pic_feature,
objcopy_embed_flags_feature,
opt_feature,
dbg_feature,
user_compile_flags_feature,
sysroot_feature,
unfiltered_compile_flags_feature,
]
else:
fail("Unreachable")
if (ctx.attr.cpu == "armeabi-v7a"):
cxx_builtin_include_directories = [
"%package(@org_linaro_components_toolchain_gcc_5_3_1//include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/usr/include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/usr/lib/include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/5.3.1/include-fixed)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//include)%/c++/5.3.1",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/5.3.1/include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/5.3.1/include-fixed)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//lib/gcc/arm-linux-gnueabihf/5.3.1/include)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//lib/gcc/arm-linux-gnueabihf/5.3.1/include-fixed)%",
"%package(@org_linaro_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/include)%/c++/5.3.1",
]
elif (ctx.attr.cpu == "armeabi-v6"):
cxx_builtin_include_directories = [
"%package(@raspi_components_toolchain_gcc_4_8_3//include)%",
"%package(@raspi_components_toolchain_gcc_4_8_3//arm-linux-gnueabihf/libc/usr/include)%",
"%package(@raspi_components_toolchain_gcc_5_3_1//arm-linux-gnueabihf/libc/usr/include/arm-linux-gnueabihf)%",
"%package(@raspi_components_toolchain_gcc_4_8_3//arm-linux-gnueabihf/libc/usr/lib/include)%",
"%package(@raspi_components_toolchain_gcc_4_8_3//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/4.8.3/include-fixed)%",
"%package(@raspi_components_toolchain_gcc_4_8_3//include)%/c++/4.8.3",
"%package(@raspi_components_toolchain_gcc_4_8_3//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/4.8.3/include)%",
"%package(@raspi_components_toolchain_gcc_4_8_3//arm-linux-gnueabihf/libc/lib/gcc/arm-linux-gnueabihf/4.8.3/include-fixed)%",
"%package(@raspi_components_toolchain_gcc_4_8_3//lib/gcc/arm-linux-gnueabihf/4.8.3/include)%",
"%package(@raspi_components_toolchain_gcc_4_8_3//lib/gcc/arm-linux-gnueabihf/4.8.3/include-fixed)%",
"%package(@raspi_components_toolchain_gcc_4_8_3//arm-linux-gnueabihf/include)%/c++/4.8.3",
]
elif (ctx.attr.cpu == "k8"):
cxx_builtin_include_directories = [
"/usr/include/c++/4.8",
"/usr/include/x86_64-linux-gnu/c++/4.8",
"/usr/include/c++/4.8/backward",
"/usr/lib/gcc/x86_64-linux-gnu/4.8/include",
"/usr/local/include",
"/usr/lib/gcc/x86_64-linux-gnu/4.8/include-fixed",
"/usr/include/x86_64-linux-gnu",
"/usr/include",
]
else:
fail("Unreachable")
artifact_name_patterns = []
make_variables = []
if (ctx.attr.cpu == "armeabi-v7a"):
tool_paths = [
tool_path(
name = "ar",
path = "linaro_linux_gcc/arm-linux-gnueabihf-ar",
),
tool_path(
name = "compat-ld",
path = "linaro_linux_gcc/arm-linux-gnueabihf-ld",
),
tool_path(
name = "cpp",
path = "linaro_linux_gcc/arm-linux-gnueabihf-gcc",
),
tool_path(
name = "dwp",
path = "linaro_linux_gcc/arm-linux-gnueabihf-dwp",
),
tool_path(
name = "gcc",
path = "linaro_linux_gcc/arm-linux-gnueabihf-gcc",
),
tool_path(
name = "gcov",
path = "arm-frc-linux-gnueabi/arm-frc-linux-gnueabi-gcov-4.9",
),
tool_path(
name = "ld",
path = "linaro_linux_gcc/arm-linux-gnueabihf-ld",
),
tool_path(
name = "nm",
path = "linaro_linux_gcc/arm-linux-gnueabihf-nm",
),
tool_path(
name = "objcopy",
path = "linaro_linux_gcc/arm-linux-gnueabihf-objcopy",
),
tool_path(
name = "objdump",
path = "linaro_linux_gcc/arm-linux-gnueabihf-objdump",
),
tool_path(
name = "strip",
path = "linaro_linux_gcc/arm-linux-gnueabihf-strip",
),
]
elif (ctx.attr.cpu == "armeabi-v6"):
tool_paths = [
tool_path(
name = "ar",
path = "raspi_linux_gcc/arm-linux-gnueabihf-ar",
),
tool_path(
name = "compat-ld",
path = "raspi_linux_gcc/arm-linux-gnueabihf-ld",
),
tool_path(
name = "cpp",
path = "raspi_linux_gcc/arm-linux-gnueabihf-gcc",
),
tool_path(
name = "dwp",
path = "raspi_linux_gcc/arm-linux-gnueabihf-dwp",
),
tool_path(
name = "gcc",
path = "raspi_linux_gcc/arm-linux-gnueabihf-gcc",
),
tool_path(
name = "gcov",
path = "arm-frc-linux-gnueabi/arm-frc-linux-gnueabi-gcov-4.9",
),
tool_path(
name = "ld",
path = "raspi_linux_gcc/arm-linux-gnueabihf-ld",
),
tool_path(
name = "nm",
path = "raspi_linux_gcc/arm-linux-gnueabihf-nm",
),
tool_path(
name = "objcopy",
path = "raspi_linux_gcc/arm-linux-gnueabihf-objcopy",
),
tool_path(
name = "objdump",
path = "raspi_linux_gcc/arm-linux-gnueabihf-objdump",
),
tool_path(
name = "strip",
path = "raspi_linux_gcc/arm-linux-gnueabihf-strip",
),
]
elif (ctx.attr.cpu == "k8"):
tool_paths = [
tool_path(name = "ar", path = "/usr/bin/ar"),
tool_path(name = "cpp", path = "/usr/bin/cpp"),
tool_path(name = "dwp", path = "/usr/bin/dwp"),
tool_path(name = "gcc", path = "/usr/bin/gcc"),
tool_path(name = "gcov", path = "/usr/bin/gcov"),
tool_path(name = "ld", path = "/usr/bin/ld"),
tool_path(name = "nm", path = "/usr/bin/nm"),
tool_path(name = "objcopy", path = "/usr/bin/objcopy"),
tool_path(name = "objdump", path = "/usr/bin/objdump"),
tool_path(name = "strip", path = "/usr/bin/strip"),
]
else:
fail("Unreachable")
out = ctx.actions.declare_file(ctx.label.name)
ctx.actions.write(out, "Fake executable")
return [
cc_common.create_cc_toolchain_config_info(
ctx = ctx,
features = features,
action_configs = action_configs,
artifact_name_patterns = artifact_name_patterns,
cxx_builtin_include_directories = cxx_builtin_include_directories,
toolchain_identifier = toolchain_identifier,
host_system_name = host_system_name,
target_system_name = target_system_name,
target_cpu = target_cpu,
target_libc = target_libc,
compiler = compiler,
abi_version = abi_version,
abi_libc_version = abi_libc_version,
tool_paths = tool_paths,
make_variables = make_variables,
builtin_sysroot = builtin_sysroot,
cc_target_os = cc_target_os,
),
DefaultInfo(
executable = out,
),
]
cc_toolchain_config = rule(
implementation = _impl,
attrs = {
"cpu": attr.string(mandatory = True, values = ["armeabi-v7a", "armeabi-v6", "k8"]),
},
provides = [CcToolchainConfigInfo],
executable = True,
)
| 41.947312
| 143
| 0.460614
|
e6cfaabb38cd42b18945c37513d0fc168dc82a60
| 1,901
|
py
|
Python
|
DAE.py
|
brunnovicente/DeepSelfLabeled
|
d598d3e3bda360288dc73400206d43bf2d2ad0db
|
[
"Apache-2.0"
] | null | null | null |
DAE.py
|
brunnovicente/DeepSelfLabeled
|
d598d3e3bda360288dc73400206d43bf2d2ad0db
|
[
"Apache-2.0"
] | null | null | null |
DAE.py
|
brunnovicente/DeepSelfLabeled
|
d598d3e3bda360288dc73400206d43bf2d2ad0db
|
[
"Apache-2.0"
] | null | null | null |
import sys
from time import time
import numpy as np
import pandas as pd
import keras.backend as K
from keras.initializers import RandomNormal
from keras.engine.topology import Layer, InputSpec
from keras.models import Model, Sequential, load_model
from keras.layers import Dense, Dropout, Input
from keras.optimizers import SGD
from sklearn.preprocessing import normalize
from keras.callbacks import LearningRateScheduler
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.decomposition import PCA
from numpy import linalg
from sklearn.cluster import KMeans
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
from keras.utils import np_utils
class DeepAutoEncoder(object):
def __init__(self, dim, k, epocas=100, lote = 256):
self.epocas = epocas
self.dim = dim
self.k = k
self.lote = 256
input_img = Input((dim,))
#encoded = Dense(32, activation='relu')(input_img)
#drop = Dropout(0.2)(encoded)
#encoded = Dense(10, activation='relu')(encoded)
#drop = Dropout(0.2)(encoded)
#encoded = Dense(32, activation='relu')(encoded)
Z = Dense(10, activation='relu')(input_img)
#decoded = Dense(32, activation='relu')(Z)
#drop = Dropout(0.2)(decoded)
#decoded = Dense(64, activation='relu')(decoded)
#drop = Dropout(0.2)(decoded)
#decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(dim, activation='sigmoid')(Z)
self.encoder = Model(input_img, Z)
self.autoencoder = Model(input_img, decoded)
self.autoencoder.compile(loss='mse', optimizer='adadelta')
self.autoencoder.summary()
def fit(self, X):
self.autoencoder.fit(X,X, epochs=self.epocas, batch_size=self.lote, shuffle=True)
| 36.557692
| 89
| 0.6707
|
2bdaa3401dfa43a9a537d9a2fd8164704f7f17a4
| 2,231
|
py
|
Python
|
test/Interactive/option-n.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 1,403
|
2017-11-23T14:24:01.000Z
|
2022-03-30T20:59:39.000Z
|
test/Interactive/option-n.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 3,708
|
2017-11-27T13:47:12.000Z
|
2022-03-29T17:21:17.000Z
|
test/Interactive/option-n.py
|
moroten/scons
|
20927b42ed4f0cb87f51287fa3b4b6cf915afcf8
|
[
"MIT"
] | 281
|
2017-12-01T23:48:38.000Z
|
2022-03-31T15:25:44.000Z
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the -n option, specified on the build command, reports
what would be built but doesn't actually build anything.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
Command('foo.out', 'foo.in', Copy('$TARGET', '$SOURCE'))
Command('1', [], Touch('$TARGET'))
Command('2', [], Touch('$TARGET'))
""")
test.write('foo.in', "foo.in\n")
scons = test.start(arguments = '-Q --interactive')
scons.send("build -n foo.out\n")
scons.send("build 1\n")
test.wait_for(test.workpath('1'), popen=scons)
test.must_not_exist(test.workpath('foo.out'))
scons.send("build foo.out\n")
scons.send("build 2\n")
test.wait_for(test.workpath('2'), popen=scons)
test.must_match(test.workpath('foo.out'), "foo.in\n")
expect_stdout = """\
scons>>> Copy("foo.out", "foo.in")
scons>>> Touch("1")
scons>>> Copy("foo.out", "foo.in")
scons>>> Touch("2")
scons>>>
"""
test.finish(scons, stdout = expect_stdout)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 25.94186
| 73
| 0.722546
|
cde9fc5a5110df611d0781ce0e33eed4e85194bd
| 3,401
|
py
|
Python
|
cifar10-pytorch/models/densenet.py
|
PingjunChen/pytorch-study
|
2bc05f3a310d4bf4f618b0a5adfc684a81f75efa
|
[
"Apache-2.0"
] | 4
|
2018-06-25T07:06:54.000Z
|
2018-09-21T20:39:37.000Z
|
cifar10-pytorch/models/densenet.py
|
PingjunChen/pytorch_study
|
2bc05f3a310d4bf4f618b0a5adfc684a81f75efa
|
[
"Apache-2.0"
] | null | null | null |
cifar10-pytorch/models/densenet.py
|
PingjunChen/pytorch_study
|
2bc05f3a310d4bf4f618b0a5adfc684a81f75efa
|
[
"Apache-2.0"
] | 4
|
2018-06-25T07:06:48.000Z
|
2020-05-04T20:08:55.000Z
|
# -*- coding: utf-8 -*-
import os, sys, pdb
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121(num_classes=10):
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32, num_classes=num_classes)
def DenseNet169(num_classes=10):
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32, num_classes=num_classes)
def DenseNet201(num_classes=10):
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32, num_classes=num_classes)
| 35.427083
| 96
| 0.65863
|
240e50804cf2e490b5b78700346c0dc6782c21a4
| 1,810
|
py
|
Python
|
invisible_skin.py
|
Dilshad737/Simple-Skin-Detection
|
f1f827c2d4d1354064e2cc73993b8b92d656e3c6
|
[
"MIT"
] | 1
|
2020-10-23T18:50:30.000Z
|
2020-10-23T18:50:30.000Z
|
invisible_skin.py
|
Dilshad737/Simple-Skin-Detection
|
f1f827c2d4d1354064e2cc73993b8b92d656e3c6
|
[
"MIT"
] | null | null | null |
invisible_skin.py
|
Dilshad737/Simple-Skin-Detection
|
f1f827c2d4d1354064e2cc73993b8b92d656e3c6
|
[
"MIT"
] | null | null | null |
import imutils
import numpy as np
import argparse
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the (optional) video file")
args = vars(ap.parse_args())
# define the upper and lower boundaries of the HSV pixel.
# you can also use the rgb pixel scaling for this.
lower = np.array([0, 10, 60], dtype="uint8")
upper = np.array([20, 150, 255], dtype="uint8")
# if a video path was not supplied, grab the reference
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# else:
# camera = cv2.VideoCapture(args["video"])
while True:
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame, then we have reached the end of the video
if args.get("video") and not grabbed:
break
frame = imutils.resize(frame, width=400)
converted = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
skinMask = cv2.inRange(converted, lower, upper)
# apply a series of erosions and dilations to the mask
# using an elliptical kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
skinMask = cv2.erode(skinMask, kernel, iterations=2)
skinMask = cv2.dilate(skinMask, kernel, iterations=2)
# blur the mask to help remove noise, then apply the
# mask to the frame
skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0)
skin = cv2.bitwise_and(frame, frame, mask=skinMask)
# show the skin in the image along with the mask
cv2.imshow("images", np.hstack([frame, skin]))
# if the 'q' key is pressed, stop the loop
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
| 34.807692
| 103
| 0.672928
|
1f61b42b54c67446060321cdb93bb3a9431f3081
| 1,544
|
py
|
Python
|
organization/migrations/0001_initial.py
|
H0neyBadger/pmapi
|
d34dad32170e53f49e14611f5bfbfcb4eb7b8d4d
|
[
"MIT"
] | null | null | null |
organization/migrations/0001_initial.py
|
H0neyBadger/pmapi
|
d34dad32170e53f49e14611f5bfbfcb4eb7b8d4d
|
[
"MIT"
] | 1
|
2017-09-07T09:15:07.000Z
|
2017-09-07T09:15:07.000Z
|
organization/migrations/0001_initial.py
|
H0neyBadger/cmdb
|
d34dad32170e53f49e14611f5bfbfcb4eb7b8d4d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-25 17:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('group_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='auth.Group')),
('snow_name', models.CharField(max_length=100)),
('email', models.CharField(help_text='E-mail distribution list', max_length=100)),
('phone', models.CharField(max_length=100)),
('ciso', models.ForeignKey(help_text='chief information security officer', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='ciso', to=settings.AUTH_USER_MODEL)),
('manager', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='manager', to=settings.AUTH_USER_MODEL)),
('technical_contact', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='technical_contact', to=settings.AUTH_USER_MODEL)),
],
bases=('auth.group',),
),
]
| 45.411765
| 198
| 0.674223
|
e864983e7d1d27b0d8ce5846e0a38c0fb3b4223d
| 4,017
|
py
|
Python
|
wholesomebot/training/__main__.py
|
bnb32/wholesome_bot
|
44af9b71f3064e2feb7bdac68a71bf803b544b75
|
[
"MIT"
] | null | null | null |
wholesomebot/training/__main__.py
|
bnb32/wholesome_bot
|
44af9b71f3064e2feb7bdac68a71bf803b544b75
|
[
"MIT"
] | null | null | null |
wholesomebot/training/__main__.py
|
bnb32/wholesome_bot
|
44af9b71f3064e2feb7bdac68a71bf803b544b75
|
[
"MIT"
] | null | null | null |
import wholesomebot.environment.settings as cfg
from wholesomebot.misc import get_logger
from wholesomebot.training import get_last_log, clean_log, vectorize_and_train
import argparse
import os
import glob
logger = get_logger()
parser=argparse.ArgumentParser(description="Update model with new data")
parser.add_argument('-infile', type=str, help='Input file for training and/or classification.')
parser.add_argument('-just_clean', default=False, action='store_true', help='Just process infile for future training.')
parser.add_argument('-append', default=False, action='store_true', help='Append from input file to existing classification dataset.')
parser.add_argument('-train', default=False, action='store_true')
parser.add_argument('-rerun', default=False, action='store_true', help='Rebuild classification dataset from scratch using -source <source>.')
parser.add_argument('-update', default=False, action='store_true',help='Append from the most recent log file.')
parser.add_argument('-no_running_check', default=False, action='store_true', help='Use model to check messages that meet the lower probability threshold defined environment variables which may have been missed.')
parser.add_argument('-review_decisions', default=False, action='store_true', help='Reclassify all decisions made by the bot.')
parser.add_argument('-source', default='chatty', choices=['chatty','logs','input'])
parser.add_argument('-from_date', default=None)
args=parser.parse_args()
PWD=os.getcwd()
LOG_FILES=glob.glob(f'{cfg.CHATTY_DIR}/*#{cfg.CHAN}.log')
RAW_LOG=f'{cfg.CHATTY_DIR}/{cfg.CHAN}.log'
TMP=f'{cfg.DATA_DIR}/clean_tmp.txt'
T_IN=f'{cfg.DATA_DIR}/{cfg.CHAN}_data.csv'
T_OUT=f'{cfg.DATA_DIR}/{cfg.CHAN}_data_trim.csv'
if (not args.append and
not args.update and
not args.just_clean):
T_IN=args.infile
if args.review_decisions:
T_IN=f'{cfg.DATA_DIR}/{cfg.CHAN}_decisions.csv'
if args.update:
args.infile = get_last_log(from_chatty=(args.source=='chatty'),
from_logs=(args.source=='logs'),
from_date=args.from_date)
args.append=args.train=True
if args.just_clean:
clean_log(args.infile,TMP,clean=True,running_check=(not args.no_running_check))
logger.info(f'Created {TMP}')
if args.append:
clean_log(args.infile,TMP,clean=True,running_check=(not args.no_running_check))
os.system(f'tail -n +2 {TMP} >> {T_IN}')
logger.info(f'Appended {TMP} to {T_IN}')
elif args.rerun:
if args.source=='chatty':
logger.info(f'Building raw log: {RAW_LOG}')
os.system(f'rm -f {RAW_LOG}')
for log_file in LOG_FILES:
os.system(f'cat {log_file} >> {RAW_LOG}')
clean_log(RAW_LOG,TMP,clean=True,running_check=(not args.no_running_check))
os.system(f'cp {TMP} {T_IN}')
logger.info(f'Created {T_IN}')
else:
clean_log(args.infile,TMP,clean=True,running_check=(not args.no_running_check))
os.system(f'cp {TMP} {T_IN}')
logger.info(f'Created {T_IN}')
elif args.review_decisions:
if args.source=='chatty':
logger.info(f'Building raw log: {RAW_LOG}')
os.system(f'rm -f {RAW_LOG}')
for log_file in LOG_FILES:
os.system(f'cat {log_file} >> {RAW_LOG}')
clean_log(RAW_LOG,TMP,clean=True,
running_check=(not args.no_running_check),
review_decisions=args.review_decisions)
os.system(f'cp {TMP} {T_IN}')
logger.info(f'Created {T_IN}')
else:
clean_log(args.infile,TMP,clean=True,
running_check=(not args.no_running_check),
review_decisions=args.review_decisions)
os.system(f'cp {TMP} {T_IN}')
logger.info(f'Created {T_IN}')
if args.train:
clean_log(T_IN,T_OUT,trim=True,running_check=(not args.no_running_check))
vectorize_and_train(model_type='linearsvm',vec_type='tfidf',data_file=T_OUT)
#vectorize_and_train(model_type='linearsvm',vec_type='hash',data_file=T_OUT)
| 44.633333
| 212
| 0.699527
|
a6157eb5e69cf1b6356a9c8823271ddd8958df44
| 1,589
|
py
|
Python
|
tests/core/plugins/test_workflow.py
|
lokijuhy/renku-python
|
0bfceafa4e6b4750439ab0ed20c61b0a6ba03a1f
|
[
"Apache-2.0"
] | 26
|
2018-06-04T15:21:50.000Z
|
2022-02-11T17:31:24.000Z
|
tests/core/plugins/test_workflow.py
|
lokijuhy/renku-python
|
0bfceafa4e6b4750439ab0ed20c61b0a6ba03a1f
|
[
"Apache-2.0"
] | 1,655
|
2018-05-17T22:07:50.000Z
|
2022-03-31T21:22:01.000Z
|
tests/core/plugins/test_workflow.py
|
lokijuhy/renku-python
|
0bfceafa4e6b4750439ab0ed20c61b0a6ba03a1f
|
[
"Apache-2.0"
] | 19
|
2018-05-18T14:12:25.000Z
|
2022-03-30T19:51:35.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test plugins for the ``workflow`` command."""
from renku.cli import cli
from renku.core.plugins import pluginmanager as pluginmanager
def test_renku_workflow_exporter_hook(monkeypatch, dummy_workflow_exporter_hook, runner, project):
"""Tests that the renku workflow export plugin hook on ``Plan`` is called."""
pm = pluginmanager.get_plugin_manager()
pm.register(dummy_workflow_exporter_hook)
with monkeypatch.context() as m:
m.setattr(pluginmanager, "get_plugin_manager", lambda: pm)
cmd = ["echo", "test"]
run_name = "run_1"
result = runner.invoke(cli, ["run", "--no-output", "--name", run_name] + cmd)
assert 0 == result.exit_code
result = runner.invoke(cli, ["workflow", "export", "--format", "dummy", run_name])
assert "dummy" in result.output
| 40.74359
| 98
| 0.717432
|
5bf6b2e6123f8076b44992c64bd0cd13854b9605
| 104
|
py
|
Python
|
pacotes/calc/__init__.py
|
C3As/COD3R-Curso-Python
|
13e778108388e290da433db991838c307750a337
|
[
"MIT"
] | null | null | null |
pacotes/calc/__init__.py
|
C3As/COD3R-Curso-Python
|
13e778108388e290da433db991838c307750a337
|
[
"MIT"
] | null | null | null |
pacotes/calc/__init__.py
|
C3As/COD3R-Curso-Python
|
13e778108388e290da433db991838c307750a337
|
[
"MIT"
] | null | null | null |
from pacote1.modulo1 import soma
from pacote2.modulo1 import subtracao
__all__ = ['soma', 'subtracao']
| 20.8
| 37
| 0.778846
|
77ad576e9e35aeda7ebb4d7ac44cff71f0d1f533
| 10,751
|
py
|
Python
|
pytorch/nnutils_pytorch/adaptive_avgpool_2d_test.py
|
jpuigcerver/nnutils
|
3167fdeb834b036709fbb34a11e439c451a758ca
|
[
"MIT"
] | 4
|
2018-11-06T17:34:20.000Z
|
2020-12-03T13:55:29.000Z
|
pytorch/nnutils_pytorch/adaptive_avgpool_2d_test.py
|
carmocca/nnutils
|
3167fdeb834b036709fbb34a11e439c451a758ca
|
[
"MIT"
] | 4
|
2020-02-06T09:56:02.000Z
|
2020-12-03T12:59:35.000Z
|
pytorch/nnutils_pytorch/adaptive_avgpool_2d_test.py
|
carmocca/nnutils
|
3167fdeb834b036709fbb34a11e439c451a758ca
|
[
"MIT"
] | 6
|
2018-07-24T13:00:44.000Z
|
2020-12-03T13:55:34.000Z
|
from __future__ import absolute_import
import numpy as np
import torch
import unittest
from nnutils_pytorch import adaptive_avgpool_2d
from torch.nn.functional import adaptive_avg_pool2d as torch_adaptive_avg_pool2d
class AdaptiveAvgpool2dTest(unittest.TestCase):
def setUp(self):
self._s = torch.LongTensor([[3, 4], [2, 8]])
self._x = torch.Tensor(
[
# Img 1 (3 x 4)
[
[1, 2, 3, 4, 99, 99, 99, 99],
[5, 6, 7, 8, 99, 99, 99, 99],
[9, 10, 11, 12, 99, 99, 99, 99],
],
# Img 2 (2 x 8)
[
[1, 2, 3, 4, 5, 6, 7, 8],
[9, 10, 11, 12, 13, 14, 15, 16],
[99, 99, 99, 99, 99, 99, 99, 99],
],
]
).resize_(2, 1, 3, 8)
self._dy = torch.Tensor(
[
# Output gradient w.r.t Image 1
[[3, 6, 9, 12]],
# Output gradient w.r.t. Image 2
[[8, 12, 16, 20]],
]
).resize_(2, 1, 1, 4)
self._dy_fixed_height = torch.Tensor(
[
# Output gradient w.r.t. Image 1
[[3, 6, 9, 12, 0, 0, 0, 0]],
# Output gradient w.r.t. Image 2
[[6, 8, 10, 12, 14, 16, 18, 20]],
]
).resize_(2, 1, 1, 8)
self._dy_fixed_width = torch.Tensor(
[
# Output gradient w.r.t. Image 1
[[2, 4], [6, 8], [10, 12]],
# Output gradient w.r.t. Image 2
[[4, 8], [12, 16], [0, 0]],
]
).resize_(2, 1, 3, 2)
self._expect_y = torch.Tensor(
[
# Expected output 1
[[5, 6, 7, 8]],
# Expected output 2
[[5.5, 7.5, 9.5, 11.5]],
]
).resize_(2, 1, 1, 4)
self._expect_y_fixed_height = torch.Tensor(
[
# Expected output 1
[[5, 6, 7, 8, 0, 0, 0, 0]],
# Expected output 2
[[5, 6, 7, 8, 9, 10, 11, 12]],
]
).resize_(2, 1, 1, 8)
self._expect_y_fixed_width = torch.Tensor(
[
# Expected output 1
[[1.5, 3.5], [5.5, 7.5], [9.5, 11.5]],
# Expected output 2
[[2.5, 6.5], [10.5, 14.5], [0, 0]],
]
).resize_(2, 1, 3, 2)
self._expect_dx = torch.Tensor(
[
# Input gradient w.r.t. Image 1
[
[1, 2, 3, 4, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 0, 0, 0],
],
# Input gradient w.r.t. Image 2
[
[2, 2, 3, 3, 4, 4, 5, 5],
[2, 2, 3, 3, 4, 4, 5, 5],
[0, 0, 0, 0, 0, 0, 0, 0],
],
]
).resize_(2, 1, 3, 8)
self._expect_dx_fixed_height = torch.Tensor(
[
# Input gradient w.r.t. Image 1
[
[1, 2, 3, 4, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 0, 0, 0],
[1, 2, 3, 4, 0, 0, 0, 0],
],
# Input gradient w.r.t. Image 2
[
[3, 4, 5, 6, 7, 8, 9, 10],
[3, 4, 5, 6, 7, 8, 9, 10],
[0, 0, 0, 0, 0, 0, 0, 0],
],
]
).resize_(2, 1, 3, 8)
self._expect_dx_fixed_width = torch.Tensor(
[
# Input gradient w.r.t. Image 1
[
[1, 1, 2, 2, 0, 0, 0, 0],
[3, 3, 4, 4, 0, 0, 0, 0],
[5, 5, 6, 6, 0, 0, 0, 0],
],
# Input gradient w.r.t. Image 2
[
[1, 1, 1, 1, 2, 2, 2, 2],
[3, 3, 3, 3, 4, 4, 4, 4],
[0, 0, 0, 0, 0, 0, 0, 0],
],
]
).resize_(2, 1, 3, 8)
def convert(self, cuda, dtype):
self._x = self._x.type(dtype)
self._dy = self._dy.type(dtype)
self._dy_fixed_height = self._dy_fixed_height.type(dtype)
self._dy_fixed_width = self._dy_fixed_width.type(dtype)
self._expect_y = self._expect_y.type(dtype)
self._expect_y_fixed_height = self._expect_y_fixed_height.type(dtype)
self._expect_y_fixed_width = self._expect_y_fixed_width.type(dtype)
self._expect_dx = self._expect_dx.type(dtype)
self._expect_dx_fixed_height = self._expect_dx_fixed_height.type(dtype)
self._expect_dx_fixed_width = self._expect_dx_fixed_width.type(dtype)
if cuda:
self._x = self._x.cuda()
self._s = self._s.cuda()
self._dy = self._dy.cuda()
self._dy_fixed_height = self._dy_fixed_height.cuda()
self._dy_fixed_width = self._dy_fixed_width.cuda()
else:
self._x = self._x.cpu()
self._s = self._s.cpu()
self._dy = self._dy.cpu()
self._dy_fixed_height = self._dy_fixed_height.cpu()
self._dy_fixed_width = self._dy_fixed_width.cpu()
def run_base(self, cuda, ttype):
self.convert(cuda, ttype)
x = self._x.detach().requires_grad_()
xs = self._s.detach()
y = adaptive_avgpool_2d(x, output_sizes=(1, 4), batch_sizes=xs)
y.backward(self._dy, retain_graph=True)
np.testing.assert_array_almost_equal(y.data.cpu(), self._expect_y)
np.testing.assert_array_almost_equal(x.grad.data.cpu(), self._expect_dx)
def run_fixed_height(self, cuda, ttype):
self.convert(cuda, ttype)
x = self._x.detach().requires_grad_()
xs = self._s.detach()
y = adaptive_avgpool_2d(x, output_sizes=(1, None), batch_sizes=xs)
y.backward(self._dy_fixed_height, retain_graph=True)
np.testing.assert_array_almost_equal(y.data.cpu(), self._expect_y_fixed_height)
np.testing.assert_array_almost_equal(
x.grad.data.cpu(), self._expect_dx_fixed_height
)
def run_fixed_width(self, cuda, ttype):
self.convert(cuda, ttype)
x = self._x.detach().requires_grad_()
xs = self._s.detach()
y = adaptive_avgpool_2d(x, output_sizes=(None, 2), batch_sizes=xs)
y.backward(self._dy_fixed_width, retain_graph=True)
np.testing.assert_array_almost_equal(y.data.cpu(), self._expect_y_fixed_width)
np.testing.assert_array_almost_equal(
x.grad.data.cpu(), self._expect_dx_fixed_width
)
@staticmethod
def run_compare_reference_smaller_output(cuda, ttype):
x3 = torch.randn(2, 3, 10, 15).type(ttype)
xs3 = torch.LongTensor([[4, 5], [8, 6]])
x1 = x3[0, :, :4, :5].clone().view(1, 3, 4, 5)
x2 = x3[1, :, :8, :6].clone().view(1, 3, 8, 6)
if cuda:
x1 = x1.cuda()
x2 = x2.cuda()
x3 = x3.cuda()
xs3 = xs3.cuda()
else:
x1 = x1.cpu()
x2 = x2.cpu()
x3 = x3.cpu()
xs3 = xs3.cpu()
x1 = x1.requires_grad_()
x2 = x2.requires_grad_()
x3 = x3.requires_grad_()
# Compare forward
y1 = torch_adaptive_avg_pool2d(x1, output_size=(2, 3))
y2 = torch_adaptive_avg_pool2d(x2, output_size=(2, 3))
y3 = adaptive_avgpool_2d(x3, output_sizes=(2, 3), batch_sizes=xs3)
np.testing.assert_almost_equal(
y3.data.cpu().numpy(), torch.cat([y1, y2]).data.cpu().numpy()
)
# Compare backward
dx1, dx2, = torch.autograd.grad(y1.sum() + y2.sum(), [x1, x2])
dx3, = torch.autograd.grad(y3.sum(), [x3])
ref = dx3.clone().zero_()
ref[0, :, :4, :5] = dx1.data
ref[1, :, :8, :6] = dx2.data
np.testing.assert_almost_equal(dx3.data.cpu().numpy(), ref.data.cpu().numpy())
@staticmethod
def run_compare_reference_larger_output(cuda, ttype):
x3 = torch.randn(2, 3, 10, 15).type(ttype)
xs3 = torch.LongTensor([[4, 5], [8, 6]])
x1 = x3[0, :, :4, :5].clone().view(1, 3, 4, 5)
x2 = x3[1, :, :8, :6].clone().view(1, 3, 8, 6)
if cuda:
x1 = x1.cuda()
x2 = x2.cuda()
x3 = x3.cuda()
xs3 = xs3.cuda()
else:
x1 = x1.cpu()
x2 = x2.cpu()
x3 = x3.cpu()
xs3 = xs3.cpu()
x1 = x1.requires_grad_()
x2 = x2.requires_grad_()
x3 = x3.requires_grad_()
# Compare forward
y1 = torch_adaptive_avg_pool2d(x1, output_size=(20, 25))
y2 = torch_adaptive_avg_pool2d(x2, output_size=(20, 25))
y3 = adaptive_avgpool_2d(x3, output_sizes=(20, 25), batch_sizes=xs3)
np.testing.assert_almost_equal(
y3.data.cpu().numpy(), torch.cat([y1, y2]).data.cpu().numpy()
)
# Compare backward
dx1, dx2, = torch.autograd.grad(y1.sum() + y2.sum(), [x1, x2])
dx3, = torch.autograd.grad(y3.sum(), [x3])
ref = dx3.clone().zero_()
ref[0, :, :4, :5] = dx1.data
ref[1, :, :8, :6] = dx2.data
np.testing.assert_almost_equal(dx3.data.cpu().numpy(), ref.data.cpu().numpy())
# Register tests for different types, and different devices.
types = [("torch.FloatTensor", "f32"), ("torch.DoubleTensor", "f64")]
devices = [("cpu", False)]
if torch.cuda.is_available():
devices += [("gpu", True)]
for ttype, dtype in types:
for device, use_cuda in devices:
setattr(
AdaptiveAvgpool2dTest,
"test_%s_%s" % (device, dtype),
lambda self: self.run_base(use_cuda, ttype),
)
setattr(
AdaptiveAvgpool2dTest,
"test_fixed_height_%s_%s" % (device, dtype),
lambda self: self.run_fixed_height(use_cuda, ttype),
)
setattr(
AdaptiveAvgpool2dTest,
"test_fixed_width_%s_%s" % (device, dtype),
lambda self: self.run_fixed_width(use_cuda, ttype),
)
setattr(
AdaptiveAvgpool2dTest,
"test_compare_to_reference_smaller_output_%s_%s" % (device, dtype),
lambda self: self.run_compare_reference_smaller_output(use_cuda, ttype),
)
setattr(
AdaptiveAvgpool2dTest,
"test_compare_to_reference_larger_output_%s_%s" % (device, dtype),
lambda self: self.run_compare_reference_larger_output(use_cuda, ttype),
)
if __name__ == "__main__":
unittest.main()
| 36.198653
| 87
| 0.485164
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.